cmd/makemac: add support for multiple swarming servers
This adds an extra layer to the image configuration, specifying which
swarming host a given configuration set belongs.
Change-Id: Ie439a9bf3295777495c3d985b840463e60a46e4e
Reviewed-on: https://go-review.googlesource.com/c/build/+/570600
Reviewed-by: Michael Knyszek <mknyszek@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Auto-Submit: Michael Pratt <mpratt@google.com>
diff --git a/cmd/makemac/config.go b/cmd/makemac/config.go
index 8a72ffb..742a090 100644
--- a/cmd/makemac/config.go
+++ b/cmd/makemac/config.go
@@ -5,10 +5,28 @@
package main
import (
+ "cmp"
"fmt"
"log"
+ "slices"
+
+ "go.chromium.org/luci/swarming/client/swarming"
)
+// swarmingConfig describes a swarming server.
+type swarmingConfig struct {
+ Host string // Swarming host URL
+ Pool string // Pool containing MacService bots
+
+ client swarming.Client
+}
+
+// Standard public swarming host.
+var publicSwarming = &swarmingConfig{
+ Host: "chromium-swarm.appspot.com",
+ Pool: "luci.golang.shared-workers",
+}
+
// imageConfig describes how many instances of a specific image type should
// exist.
type imageConfig struct {
@@ -19,7 +37,7 @@
MinCount int // minimum instance count to maintain
}
-// Production image configuration.
+// Production image configuration for each swarming host.
//
// After changing an image here, makemac will automatically destroy instances
// with the old image. Changing hostname, cert, or key will _not_ automatically
@@ -28,41 +46,43 @@
// TODO(prattmic): rather than storing secrets in secret manager, makemac could
// use genbotcert to generate valid certificate/key pairs on the fly, unique to
// each lease, which could then have unique hostnames.
-var prodImageConfig = []imageConfig{
- {
- Hostname: "darwin-amd64-10_15",
- Cert: "secret:symbolic-datum-552/darwin-amd64-10_15-cert",
- Key: "secret:symbolic-datum-552/darwin-amd64-10_15-key",
- Image: "4aaca93eedef29a20715259ee1a5a5f4309528dc1ef8a0ab2a0dafa08286ca57",
- MinCount: 5, // release branches only
- },
- {
- Hostname: "darwin-amd64-11",
- Cert: "secret:symbolic-datum-552/darwin-amd64-11-cert",
- Key: "secret:symbolic-datum-552/darwin-amd64-11-key",
- Image: "f0cc898922b37726f6d5ad7b260e92b0443c6289b535cb0a32fd2955abe8adcc",
- MinCount: 10,
- },
- {
- Hostname: "darwin-amd64-12",
- Cert: "secret:symbolic-datum-552/darwin-amd64-12-cert",
- Key: "secret:symbolic-datum-552/darwin-amd64-12-key",
- Image: "0a45171fb12a7efc3e7c5170b3292e592822dfc63c15aca0d093d94621097b8d",
- MinCount: 10,
- },
- {
- Hostname: "darwin-amd64-13",
- Cert: "secret:symbolic-datum-552/darwin-amd64-13-cert",
- Key: "secret:symbolic-datum-552/darwin-amd64-13-key",
- Image: "f1bda73984f0725f2fa147d277ef87498bdec170030e1c477ee3576b820f1fb6",
- MinCount: 10,
- },
- {
- Hostname: "darwin-amd64-14",
- Cert: "secret:symbolic-datum-552/darwin-amd64-14-cert",
- Key: "secret:symbolic-datum-552/darwin-amd64-14-key",
- Image: "ad1a56b7fec85ead9992b04444c4b5aef81becf38f85529976646f14a9ce5410",
- MinCount: 10,
+var prodImageConfig = map[*swarmingConfig][]imageConfig{
+ publicSwarming: {
+ {
+ Hostname: "darwin-amd64-10_15",
+ Cert: "secret:symbolic-datum-552/darwin-amd64-10_15-cert",
+ Key: "secret:symbolic-datum-552/darwin-amd64-10_15-key",
+ Image: "57b56e0a86984934370bf00058b2bd708031d256104167a3bbbc5ff5aaaf6939",
+ MinCount: 5, // release branches only
+ },
+ {
+ Hostname: "darwin-amd64-11",
+ Cert: "secret:symbolic-datum-552/darwin-amd64-11-cert",
+ Key: "secret:symbolic-datum-552/darwin-amd64-11-key",
+ Image: "3279e7f8aef8a1d02ba0897de44e5306f94c8cacec3c8c662a897b810879f655",
+ MinCount: 10,
+ },
+ {
+ Hostname: "darwin-amd64-12",
+ Cert: "secret:symbolic-datum-552/darwin-amd64-12-cert",
+ Key: "secret:symbolic-datum-552/darwin-amd64-12-key",
+ Image: "959a409833522fcba0be62c0c818d68b29d4e1be28d3cbf43dbbc81cb3e3fdeb",
+ MinCount: 10,
+ },
+ {
+ Hostname: "darwin-amd64-13",
+ Cert: "secret:symbolic-datum-552/darwin-amd64-13-cert",
+ Key: "secret:symbolic-datum-552/darwin-amd64-13-key",
+ Image: "30efbbd26e846da8158a7252d47b3adca15b30270668a95620ace3502cdcaa36",
+ MinCount: 10,
+ },
+ {
+ Hostname: "darwin-amd64-14",
+ Cert: "secret:symbolic-datum-552/darwin-amd64-14-cert",
+ Key: "secret:symbolic-datum-552/darwin-amd64-14-key",
+ Image: "3ec96f33cf17c85bd6d1bbf122c327bc9e5c62620c3ef9ff63e2db4feebdd8da",
+ MinCount: 10,
+ },
},
}
@@ -79,13 +99,27 @@
return m
}
-func init() {
- // Panic if prodImageConfig contains duplicates.
- imageConfigMap(prodImageConfig)
+// sortedSwarmingConfigs returns the swarming configs in c, sorted by host.
+func sortedSwarmingConfigs(c map[*swarmingConfig][]imageConfig) []*swarmingConfig {
+ scs := make([]*swarmingConfig, 0, len(c))
+ for sc := range c {
+ scs = append(scs, sc)
+ }
+ slices.SortFunc(scs, func(a, b *swarmingConfig) int {
+ return cmp.Compare(a.Host, b.Host)
+ })
+ return scs
}
-func logImageConfig(cc []imageConfig) {
- log.Printf("Image configuration:")
+func init() {
+ // Panic if prodImageConfig contains duplicates.
+ for _, c := range prodImageConfig {
+ imageConfigMap(c)
+ }
+}
+
+func logImageConfig(sc *swarmingConfig, cc []imageConfig) {
+ log.Printf("%s image configuration:", sc.Host)
for _, c := range cc {
log.Printf("\t%s: image=%s\tcount=%d", c.Hostname, c.Image, c.MinCount)
}
diff --git a/cmd/makemac/main.go b/cmd/makemac/main.go
index 8ff5a05..df11eb6 100644
--- a/cmd/makemac/main.go
+++ b/cmd/makemac/main.go
@@ -25,6 +25,7 @@
"log"
"regexp"
"sort"
+ "strings"
"time"
"go.chromium.org/luci/swarming/client/swarming"
@@ -51,16 +52,16 @@
)
const (
- swarmingHost = "chromium-swarm.appspot.com"
- swarmingPool = "luci.golang.shared-workers"
-)
-
-const (
macServiceCustomer = "golang"
- // Leases managed by makemac have ProjectName "makemac". Leases without
- // this project will not be touched.
- managedProject = "makemac"
+ // Leases managed by makemac have ProjectName "makemac/SWARMING_HOST",
+ // indicating that it is managed by makemac, and which swarming host it
+ // belongs to. Leases without this project prefix will not be touched.
+ //
+ // Note that we track the swarming host directly in the lease project
+ // name because new leases may not have yet connected to the swarming
+ // server, but we still need to know which host to count them towards.
+ managedProjectPrefix = "makemac"
)
func main() {
@@ -88,18 +89,22 @@
return fmt.Errorf("error creating authenticated client: %w", err)
}
- sc, err := swarming.NewClient(ctx, swarming.ClientOptions{
- ServiceURL: "https://"+swarmingHost,
- AuthenticatedClient: ac,
- })
- if err != nil {
- return fmt.Errorf("error creating swarming client: %w", err)
+ // Initialize each swarming client.
+ for sc, ic := range prodImageConfig {
+ c, err := swarming.NewClient(ctx, swarming.ClientOptions{
+ ServiceURL: "https://"+sc.Host,
+ AuthenticatedClient: ac,
+ })
+ if err != nil {
+ return fmt.Errorf("error creating swarming client for %s: %w", sc.Host, err)
+ }
+ sc.client = c
+
+ logImageConfig(sc, ic)
}
- logImageConfig(prodImageConfig)
-
// Always run once at startup.
- runOnce(ctx, sc, mc)
+ runOnce(ctx, prodImageConfig, mc)
if *period == 0 {
// User only wants a single check. We're done.
@@ -108,14 +113,14 @@
t := time.NewTicker(*period)
for range t.C {
- runOnce(ctx, sc, mc)
+ runOnce(ctx, prodImageConfig, mc)
}
return nil
}
-func runOnce(ctx context.Context, sc swarming.Client, mc macServiceClient) {
- bots, err := swarmingBots(ctx, sc)
+func runOnce(ctx context.Context, config map[*swarmingConfig][]imageConfig, mc macServiceClient) {
+ bots, err := swarmingBots(ctx, config)
if err != nil {
log.Printf("Error looking up swarming bots: %v", err)
return
@@ -134,12 +139,28 @@
handleMissingBots(mc, bots, leases)
handleDeadBots(mc, bots, leases)
renewLeases(mc, leases)
- handleObsoleteLeases(mc, prodImageConfig, leases)
- addNewLeases(mc, prodImageConfig, leases)
+ handleObsoleteLeases(mc, config, leases)
+ addNewLeases(mc, config, leases)
+}
+
+// leaseSwarmingHost returns the swarming host a managed lease belongs to.
+//
+// Returns "" if this isn't a managed lease.
+func leaseSwarmingHost(l macservice.Lease) string {
+ prefix, host, ok := strings.Cut(l.VMResourceNamespace.ProjectName, "/")
+ if !ok {
+ // Malformed project name, must not be managed.
+ return ""
+ }
+ if prefix != managedProjectPrefix {
+ // Some other prefix. Not managed.
+ return ""
+ }
+ return host
}
func leaseIsManaged(l macservice.Lease) bool {
- return l.VMResourceNamespace.ProjectName == managedProject
+ return leaseSwarmingHost(l) != ""
}
func logSummary(bots map[string]*spb.BotInfo, leases map[string]macservice.Instance) {
@@ -181,14 +202,14 @@
for _, k := range keys {
inst := leases[k]
- managed := false
- if leaseIsManaged(inst.Lease) {
- managed = true
+ swarming := leaseSwarmingHost(inst.Lease)
+ if swarming == "" {
+ swarming = "<unmanaged>"
}
image := inst.InstanceSpecification.DiskSelection.ImageHashes.BootSHA256
- log.Printf("\t%s: managed=%t\timage=%s", k, managed, image)
+ log.Printf("\t%s: image=%s\tswarming=%s", k, image, swarming)
}
}
@@ -198,34 +219,37 @@
// swarmingBots returns set of bots backed by MacService, as seen by swarming.
// The map key is the MacService lease ID.
// Bots may be dead.
-func swarmingBots(ctx context.Context, sc swarming.Client) (map[string]*spb.BotInfo, error) {
- dimensions := []*spb.StringPair{
- {
- Key: "pool",
- Value: swarmingPool,
- },
- {
- Key: "os",
- Value: "Mac",
- },
- }
- bb, err := sc.ListBots(ctx, dimensions)
- if err != nil {
- return nil, fmt.Errorf("error listing bots: %w", err)
- }
-
+func swarmingBots(ctx context.Context, config map[*swarmingConfig][]imageConfig) (map[string]*spb.BotInfo, error) {
m := make(map[string]*spb.BotInfo)
- for _, b := range bb {
- id := b.GetBotId()
- match := botIDRe.FindStringSubmatch(id)
- if match == nil {
- log.Printf("Swarming bot %s is not a MacService bot, skipping...", id)
- continue
+ scs := sortedSwarmingConfigs(config)
+ for _, sc := range scs {
+ dimensions := []*spb.StringPair{
+ {
+ Key: "pool",
+ Value: sc.Pool,
+ },
+ {
+ Key: "os",
+ Value: "Mac",
+ },
+ }
+ bb, err := sc.client.ListBots(ctx, dimensions)
+ if err != nil {
+ return nil, fmt.Errorf("error listing bots: %w", err)
}
- lease := match[1]
- m[lease] = b
+ for _, b := range bb {
+ id := b.GetBotId()
+ match := botIDRe.FindStringSubmatch(id)
+ if match == nil {
+ log.Printf("Swarming bot %s is not a MacService bot, skipping...", id)
+ continue
+ }
+
+ lease := match[1]
+ m[lease] = b
+ }
}
return m, nil
@@ -397,10 +421,14 @@
// handleObsoleteLeases vacates any makemac-managed leases with images that are
// not requested by imageConfigs. This typically occurs when updating makemac
// to roll out a new image version.
-func handleObsoleteLeases(mc macServiceClient, config []imageConfig, leases map[string]macservice.Instance) {
+func handleObsoleteLeases(mc macServiceClient, config map[*swarmingConfig][]imageConfig, leases map[string]macservice.Instance) {
log.Printf("Checking for leases with obsolete images...")
- configMap := imageConfigMap(config)
+ // swarming host -> image sha -> image config
+ swarmingImages := make(map[string]map[string]*imageConfig)
+ for sc, ic := range config {
+ swarmingImages[sc.Host] = imageConfigMap(ic)
+ }
var ids []string
for id := range leases {
@@ -413,13 +441,20 @@
for _, id := range ids {
lease := leases[id]
- if !leaseIsManaged(lease.Lease) {
+ swarming := leaseSwarmingHost(lease.Lease)
+ if swarming == "" {
log.Printf("Lease %s is not managed by makemac; skipping image check", id)
continue
}
+ images, ok := swarmingImages[swarming]
+ if !ok {
+ log.Printf("Lease %s belongs to unknown swarming host %s; skipping image check", id, swarming)
+ continue
+ }
+
image := lease.InstanceSpecification.DiskSelection.ImageHashes.BootSHA256
- if _, ok := configMap[image]; ok {
+ if _, ok := images[image]; ok {
continue
}
@@ -434,12 +469,12 @@
}
}
-func makeLeaseRequest(c *imageConfig) (macservice.LeaseRequest, error) {
- cert, err := secret.DefaultResolver.ResolveSecret(c.Cert)
+func makeLeaseRequest(sc *swarmingConfig, ic *imageConfig) (macservice.LeaseRequest, error) {
+ cert, err := secret.DefaultResolver.ResolveSecret(ic.Cert)
if err != nil {
return macservice.LeaseRequest{}, fmt.Errorf("error resolving certificate secret: %w", err)
}
- key, err := secret.DefaultResolver.ResolveSecret(c.Key)
+ key, err := secret.DefaultResolver.ResolveSecret(ic.Key)
if err != nil {
return macservice.LeaseRequest{}, fmt.Errorf("error resolving key secret: %w", err)
}
@@ -447,24 +482,24 @@
return macservice.LeaseRequest{
VMResourceNamespace: macservice.Namespace{
CustomerName: macServiceCustomer,
- ProjectName: managedProject,
+ ProjectName: managedProjectPrefix+"/"+sc.Host,
},
InstanceSpecification: macservice.InstanceSpecification{
Profile: macservice.V1_MEDIUM_VM,
AccessLevel: macservice.GOLANG_OSS,
DiskSelection: macservice.DiskSelection{
ImageHashes: macservice.ImageHashes{
- BootSHA256: c.Image,
+ BootSHA256: ic.Image,
},
},
Metadata: []macservice.MetadataEntry{
{
Key: "golang.swarming",
- Value: swarmingHost,
+ Value: sc.Host,
},
{
Key: "golang.hostname",
- Value: c.Hostname,
+ Value: ic.Hostname,
},
{
Key: "golang.cert",
@@ -482,59 +517,75 @@
// addNewLeases adds new MacService leases as needed to ensure that there are
// at least MinCount makemac-managed leases of each configured image type.
-func addNewLeases(mc macServiceClient, config []imageConfig, leases map[string]macservice.Instance) {
+func addNewLeases(mc macServiceClient, config map[*swarmingConfig][]imageConfig, leases map[string]macservice.Instance) {
log.Printf("Checking if new leases are required...")
- configMap := imageConfigMap(config)
-
- imageCount := make(map[string]int)
-
+ // Count images per swarming host. Each host gets a different
+ // configuration. Map of swarming host -> image sha -> count.
+ swarmingImageCount := make(map[string]map[string]int)
for _, lease := range leases {
- if !leaseIsManaged(lease.Lease) {
+ swarming := leaseSwarmingHost(lease.Lease)
+ if swarming == "" {
// Don't count leases we don't manage.
continue
}
+ if _, ok := swarmingImageCount[swarming]; !ok {
+ swarmingImageCount[swarming] = make(map[string]int)
+ }
image := lease.InstanceSpecification.DiskSelection.ImageHashes.BootSHA256
- imageCount[image]++
+ swarmingImageCount[swarming][image]++
}
- var images []string
- for image := range configMap {
- images = append(images, image)
+ // Iterate through configs in swarming order, then image order.
+ swarmingOrder := sortedSwarmingConfigs(config)
+ imageMap := make([]map[string]*imageConfig, 0, len(swarmingOrder))
+ imageOrder := make([][]string, 0, len(swarmingOrder))
+ for _, sc := range swarmingOrder {
+ m := imageConfigMap(config[sc])
+ order := make([]string, 0, len(m))
+ for image := range m {
+ order = append(order, image)
+ }
+ sort.Strings(order)
+ imageMap = append(imageMap, m)
+ imageOrder = append(imageOrder, order)
}
- sort.Strings(images)
log.Printf("Current image lease count:")
- for _, image := range images {
- config := configMap[image]
- gotCount := imageCount[config.Image]
- log.Printf("\t%s: have %d leases\twant %d leases", config.Image, gotCount, config.MinCount)
+ for i, sc := range swarmingOrder {
+ for _, image := range imageOrder[i] {
+ config := imageMap[i][image]
+ gotCount := swarmingImageCount[sc.Host][config.Image]
+ log.Printf("\tHost %s: image %s: have %d leases\twant %d leases", sc.Host, config.Image, gotCount, config.MinCount)
+ }
}
- for _, image := range images {
- config := configMap[image]
- gotCount := imageCount[config.Image]
- need := config.MinCount - gotCount
- if need <= 0 {
- continue
- }
-
- log.Printf("Image %s: creating %d new leases", config.Image, need)
- req, err := makeLeaseRequest(config)
- if err != nil {
- log.Printf("Image %s: creating lease request: error %v", config.Image, err)
- continue
- }
-
- for i := 0; i < need; i++ {
- log.Printf("Image %s: creating lease %d...", config.Image, i)
- resp, err := mc.Lease(req)
- if err != nil {
- log.Printf("Image %s: creating lease %d: error %v", config.Image, i, err)
+ for i, sc := range swarmingOrder {
+ for _, image := range imageOrder[i] {
+ config := imageMap[i][image]
+ gotCount := swarmingImageCount[sc.Host][config.Image]
+ need := config.MinCount - gotCount
+ if need <= 0 {
continue
}
- log.Printf("Image %s: created lease %s", config.Image, resp.PendingLease.LeaseID)
+
+ log.Printf("Host %s: image %s: creating %d new leases", sc.Host, config.Image, need)
+ req, err := makeLeaseRequest(sc, config)
+ if err != nil {
+ log.Printf("Host %s: image %s: creating lease request: error %v", sc.Host, config.Image, err)
+ continue
+ }
+
+ for i := 0; i < need; i++ {
+ log.Printf("Host %s: image %s: creating lease %d...", sc.Host, config.Image, i)
+ resp, err := mc.Lease(req)
+ if err != nil {
+ log.Printf("Host %s: image %s: creating lease %d: error %v", sc.Host, config.Image, i, err)
+ continue
+ }
+ log.Printf("Host %s: image %s: created lease %s", sc.Host, config.Image, resp.PendingLease.LeaseID)
+ }
}
}
}
diff --git a/cmd/makemac/main_test.go b/cmd/makemac/main_test.go
index e3fca49..9124c32 100644
--- a/cmd/makemac/main_test.go
+++ b/cmd/makemac/main_test.go
@@ -47,6 +47,8 @@
}
func TestHandleMissingBots(t *testing.T) {
+ const project = managedProjectPrefix + "/swarming.example.com"
+
// Test leases:
// * "healthy" connected to LUCI, and is healthy.
// * "dead" connected to LUCI, but later died.
@@ -63,21 +65,21 @@
"healthy": {
Lease: macservice.Lease{
LeaseID: "healthy",
- VMResourceNamespace: macservice.Namespace{ProjectName: managedProject},
+ VMResourceNamespace: macservice.Namespace{ProjectName: project},
Expires: time.Now().Add(createExpirationDuration - 2*time.Hour),
},
},
"newBooting": {
Lease: macservice.Lease{
LeaseID: "newBooting",
- VMResourceNamespace: macservice.Namespace{ProjectName: managedProject},
+ VMResourceNamespace: macservice.Namespace{ProjectName: project},
Expires: time.Now().Add(createExpirationDuration - 5*time.Minute),
},
},
"neverBooted": {
Lease: macservice.Lease{
LeaseID: "neverBooted",
- VMResourceNamespace: macservice.Namespace{ProjectName: managedProject},
+ VMResourceNamespace: macservice.Namespace{ProjectName: project},
Expires: time.Now().Add(createExpirationDuration - 2*time.Hour),
},
},
@@ -105,6 +107,8 @@
}
func TestHandleDeadBots(t *testing.T) {
+ const project = managedProjectPrefix + "/swarming.example.com"
+
// Test leases:
// * "healthy" connected to LUCI, and is healthy.
// * "dead" connected to LUCI, but later died, and the lease is gone from MacService.
@@ -122,14 +126,14 @@
"healthy": {
Lease: macservice.Lease{
LeaseID: "healthy",
- VMResourceNamespace: macservice.Namespace{ProjectName: managedProject},
+ VMResourceNamespace: macservice.Namespace{ProjectName: project},
Expires: time.Now().Add(createExpirationDuration - 2*time.Hour),
},
},
"deadLeasePresent": {
Lease: macservice.Lease{
LeaseID: "deadLeasePresent",
- VMResourceNamespace: macservice.Namespace{ProjectName: managedProject},
+ VMResourceNamespace: macservice.Namespace{ProjectName: project},
// Lease created 5 minutes ago. Doesn't matter;
// new lease checked don't apply here. See
// comment in handleDeadBots.
@@ -146,7 +150,7 @@
"neverBooted": {
Lease: macservice.Lease{
LeaseID: "neverBooted",
- VMResourceNamespace: macservice.Namespace{ProjectName: managedProject},
+ VMResourceNamespace: macservice.Namespace{ProjectName: project},
Expires: time.Now().Add(createExpirationDuration - 2*time.Hour),
},
},
@@ -167,6 +171,8 @@
}
func TestRenewLeases(t *testing.T) {
+ const project = managedProjectPrefix + "/swarming.example.com"
+
// Test leases:
// * "new" was created <1hr ago.
// * "standard" was created >1hr ago.
@@ -177,14 +183,14 @@
"new": {
Lease: macservice.Lease{
LeaseID: "new",
- VMResourceNamespace: macservice.Namespace{ProjectName: managedProject},
+ VMResourceNamespace: macservice.Namespace{ProjectName: project},
Expires: time.Now().Add(createExpirationDuration - 5*time.Minute),
},
},
"standard": {
Lease: macservice.Lease{
LeaseID: "standard",
- VMResourceNamespace: macservice.Namespace{ProjectName: managedProject},
+ VMResourceNamespace: macservice.Namespace{ProjectName: project},
Expires: time.Now().Add(renewExpirationDuration - 5*time.Minute),
},
},
@@ -208,26 +214,49 @@
}
func TestHandleObsoleteLeases(t *testing.T) {
+ swarming1 := &swarmingConfig{
+ Host: "swarming1.example.com",
+ Pool: "example.pool",
+ }
+ project1 := managedProjectPrefix + "/" + swarming1.Host
+ swarming2 := &swarmingConfig{
+ Host: "swarming2.example.com",
+ Pool: "example.pool",
+ }
+ project2 := managedProjectPrefix + "/" + swarming2.Host
+
// Test leases:
// * "active" uses image "active-image"
// * "obsolete" uses image "obsolete-image"
+ // * "obsolete-on-swarming2" uses image "obsolete-image" on "swarming2" (as configured)
// * "unmanaged" uses image "obsolete-image", but is not managed by makemac.
//
- // handleObsoleteLeases should vacate "obsolute" and none of the others.
- config := []imageConfig{
- {
- Hostname: "active",
- Cert: "dummy-cert",
- Key: "dummy-key",
- Image: "active-image",
- MinCount: 1,
+ // handleObsoleteLeases should vacate "obsolete" and none of the others.
+ config := map[*swarmingConfig][]imageConfig{
+ swarming1: {
+ {
+ Hostname: "active",
+ Cert: "dummy-cert",
+ Key: "dummy-key",
+ Image: "active-image",
+ MinCount: 1,
+ },
+ },
+ swarming2: {
+ {
+ Hostname: "obsolete-on-swarming2",
+ Cert: "dummy-cert",
+ Key: "dummy-key",
+ Image: "obsolete-image",
+ MinCount: 1,
+ },
},
}
leases := map[string]macservice.Instance{
"active": {
Lease: macservice.Lease{
LeaseID: "active",
- VMResourceNamespace: macservice.Namespace{ProjectName: managedProject},
+ VMResourceNamespace: macservice.Namespace{ProjectName: project1},
},
InstanceSpecification: macservice.InstanceSpecification{
DiskSelection: macservice.DiskSelection{
@@ -240,7 +269,20 @@
"obsolete": {
Lease: macservice.Lease{
LeaseID: "obsolete",
- VMResourceNamespace: macservice.Namespace{ProjectName: managedProject},
+ VMResourceNamespace: macservice.Namespace{ProjectName: project1},
+ },
+ InstanceSpecification: macservice.InstanceSpecification{
+ DiskSelection: macservice.DiskSelection{
+ ImageHashes: macservice.ImageHashes{
+ BootSHA256: "obsolete-image",
+ },
+ },
+ },
+ },
+ "obsolete-on-swarming2": {
+ Lease: macservice.Lease{
+ LeaseID: "obsolete-on-swarming2",
+ VMResourceNamespace: macservice.Namespace{ProjectName: project2},
},
InstanceSpecification: macservice.InstanceSpecification{
DiskSelection: macservice.DiskSelection{
@@ -276,37 +318,63 @@
}
func TestAddNewLeases(t *testing.T) {
+ swarming1 := &swarmingConfig{
+ Host: "swarming1.example.com",
+ Pool: "example.pool",
+ }
+ project1 := managedProjectPrefix + "/" + swarming1.Host
+ swarming2 := &swarmingConfig{
+ Host: "swarming2.example.com",
+ Pool: "example.pool",
+ }
+
// Test leases:
- // * "image-a-1" uses image "image-a"
+ // * "image-a-1" uses image "image-a" on "swarming1"
// * "unmanaged" uses image "image-a", but is not managed by makemac.
//
// Test images:
- // * "image-a" wants 2 instances.
- // * "image-b" wants 2 instances.
+ // * On "swarming1":
+ // * "image-a" wants 2 instances.
+ // * "image-b" wants 2 instances.
+ // * On "swarming2":
+ // * "image-a" wants 1 instances.
//
- // addNewLeases should create 1 "image-a" instance (ignoring
- // "unmanaged") and 2 "image-b" instances.
- config := []imageConfig{
- {
- Hostname: "a",
- Cert: "dummy-cert",
- Key: "dummy-key",
- Image: "image-a",
- MinCount: 2,
+ // addNewLeases should create:
+ // * 1 "image-a" instance on "swarming1"
+ // * 1 "image-a" instance on "swarming2"
+ // * 2 "image-b" instances on "swarming1"
+ config := map[*swarmingConfig][]imageConfig{
+ swarming1: {
+ {
+ Hostname: "a",
+ Cert: "dummy-cert",
+ Key: "dummy-key",
+ Image: "image-a",
+ MinCount: 2,
+ },
+ {
+ Hostname: "b",
+ Cert: "dummy-cert",
+ Key: "dummy-key",
+ Image: "image-b",
+ MinCount: 2,
+ },
},
- {
- Hostname: "b",
- Cert: "dummy-cert",
- Key: "dummy-key",
- Image: "image-b",
- MinCount: 2,
+ swarming2: {
+ {
+ Hostname: "a",
+ Cert: "dummy-cert",
+ Key: "dummy-key",
+ Image: "image-a",
+ MinCount: 1,
+ },
},
}
leases := map[string]macservice.Instance{
"image-a-1": {
Lease: macservice.Lease{
LeaseID: "image-a-1",
- VMResourceNamespace: macservice.Namespace{ProjectName: managedProject},
+ VMResourceNamespace: macservice.Namespace{ProjectName: project1},
},
InstanceSpecification: macservice.InstanceSpecification{
DiskSelection: macservice.DiskSelection{
@@ -334,17 +402,21 @@
var mc recordMacServiceClient
addNewLeases(&mc, config, leases)
- leaseA, err := makeLeaseRequest(&config[0])
+ leaseASwarm1, err := makeLeaseRequest(swarming1, &config[swarming1][0])
if err != nil {
- t.Fatalf("makeLeaseRequest(a) got err %v want nil", err)
+ t.Fatalf("makeLeaseRequest(a, swarm1) got err %v want nil", err)
}
- leaseB, err := makeLeaseRequest(&config[1])
+ leaseBSwarm1, err := makeLeaseRequest(swarming1, &config[swarming1][1])
if err != nil {
- t.Fatalf("makeLeaseRequest(b) got err %v want nil", err)
+ t.Fatalf("makeLeaseRequest(b, swarm1) got err %v want nil", err)
+ }
+ leaseASwarm2, err := makeLeaseRequest(swarming2, &config[swarming2][0])
+ if err != nil {
+ t.Fatalf("makeLeaseRequest(a, swarm2) got err %v want nil", err)
}
got := mc.lease
- want := []macservice.LeaseRequest{leaseA, leaseB, leaseB}
+ want := []macservice.LeaseRequest{leaseASwarm1, leaseBSwarm1, leaseBSwarm1, leaseASwarm2}
if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("Lease request mismatch (-want +got):\n%s", diff)
}