internal/coordinator/pool: add ledger
This change adds a ledger to the coordinator pool package. The ledger
is used to store a record of EC2 instance types created and their
attributes. It also keeps a record of resource usage for EC2 instances
in order to ensure that we do not exceed the resource limits for the account.
Updates golang/go#36841
Change-Id: I6a0afdb31c8e3a83634e7c1fc8b2b733b7a50c01
Reviewed-on: https://go-review.googlesource.com/c/build/+/247906
Run-TryBot: Carlos Amedee <carlos@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Dmitri Shuralyov <dmitshur@golang.org>
diff --git a/internal/coordinator/pool/ledger.go b/internal/coordinator/pool/ledger.go
new file mode 100644
index 0000000..105145c
--- /dev/null
+++ b/internal/coordinator/pool/ledger.go
@@ -0,0 +1,256 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.13
+// +build linux darwin
+
+package pool
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "sort"
+ "sync"
+ "time"
+
+ "golang.org/x/build/internal/cloud"
+)
+
+// entry contains the resource usage of an instance as well as
+// identifying information.
+type entry struct {
+ createdAt time.Time
+ instanceID string
+ instanceName string
+ vCPUCount int64
+}
+
+// ledger contains a record of the instances and their resource
+// consumption. Before an instance is created, a call to the ledger
+// will ensure that there are available resources for the new instance.
+type ledger struct {
+ mu sync.RWMutex
+ // cpuLimit is the limit of how many on-demand vCPUs can be created on EC2.
+ cpuLimit int64
+ // cpuUsed is the current count of vCPUs reserved for on-demand instances.
+ cpuUsed int64
+ // entries contains a mapping of instance name to entries for each instance
+ // that has resources allocated to it.
+ entries map[string]*entry
+ // types contains a mapping of instance type names to instance types for each
+ // ARM64 EC2 instance.
+ types map[string]*cloud.InstanceType
+}
+
+// newLedger creates a new ledger.
+func newLedger() *ledger {
+ return &ledger{
+ entries: make(map[string]*entry),
+ types: make(map[string]*cloud.InstanceType),
+ }
+}
+
+// ReserveResources attempts to reserve the resources required for an instance to be created.
+// It will attempt to reserve the resourses that an instance type would require. This will
+// attempt to reserve the resources until the context deadline is reached.
+func (l *ledger) ReserveResources(ctx context.Context, instName, vmType string) error {
+ instType, err := l.PrepareReservationRequest(instName, vmType)
+ if err != nil {
+ return err
+ }
+
+ t := time.NewTicker(2 * time.Second)
+ defer t.Stop()
+ for {
+ if l.allocateCPU(instType.CPU, instName) {
+ return nil
+ }
+ select {
+ case <-t.C:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+}
+
+// PrepareReservationRequest ensures all the preconditions necessary for a reservation request are
+// met. If the conditions are met then an instance type for the requested VM type is returned. If
+// not an error is returned.
+func (l *ledger) PrepareReservationRequest(instName, vmType string) (*cloud.InstanceType, error) {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+
+ instType, ok := l.types[vmType]
+ if !ok {
+ return nil, fmt.Errorf("unknown EC2 vm type: %s", vmType)
+ }
+ _, ok = l.entries[instName]
+ if ok {
+ return nil, fmt.Errorf("quota has already been allocated for %s of type %s", instName, vmType)
+ }
+ return instType, nil
+}
+
+// releaseResources deletes the entry associated with an instance. The resources associated to the
+// instance will also be released. An error is returned if the instance entry is not found.
+// Lock l.mu must be held by the caller.
+func (l *ledger) releaseResources(instName string) error {
+ e, ok := l.entries[instName]
+ if !ok {
+ return fmt.Errorf("instance not found for releasing quota: %s", instName)
+ }
+ l.deallocateCPU(e.vCPUCount)
+ return nil
+}
+
+// allocateCPU ensures that there is enough CPU to allocate below the CPU Quota
+// for the caller to create a resouce with the numCPU passed in. If there is enough
+// then the ammount of used CPU will increase by the requested ammount. If there is
+// not enough CPU available, then a false is returned. In the event that CPU is allocated
+// an entry will be added in the entries map for the instance.
+func (l *ledger) allocateCPU(numCPU int64, instName string) bool {
+ // should never happen
+ if numCPU <= 0 {
+ log.Printf("invalid allocation requested: %d", numCPU)
+ return false
+ }
+
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ if numCPU+l.cpuUsed > l.cpuLimit {
+ return false
+ }
+ l.cpuUsed += numCPU
+ e, ok := l.entries[instName]
+ if ok {
+ e.vCPUCount = numCPU
+ } else {
+ l.entries[instName] = &entry{
+ instanceName: instName,
+ vCPUCount: numCPU,
+ }
+ }
+ return true
+}
+
+// deallocateCPU releases the CPU allocated to an instance associated with an entry. When an instance
+// is deleted, the CPU allocated for the instance should not be counted against the CPU quota reserved
+// all on-demand instances. If an invalid CPU number is passed in the function will not lower the CPU count.
+// Lock l.mu must be held by the caller.
+func (l *ledger) deallocateCPU(numCPU int64) {
+ if numCPU <= 0 {
+ log.Printf("invalid deallocation requested: %d", numCPU)
+ return
+ }
+ if l.cpuUsed-numCPU < 0 {
+ log.Printf("attempting to deallocate more cpu than used: %d of %d", numCPU, l.cpuUsed)
+ return
+ }
+ l.cpuUsed -= numCPU
+}
+
+// UpdateReservation updates the entry for an instance with the id value for that instance. If
+// an entry for the instance does not exist then an error will be returned. Another mechanism should
+// be used to manage untracked instances. Updating the reservation acts as a signal that the instance
+// has actually been created since the instance ID is known.
+func (l *ledger) UpdateReservation(instName, instID string) error {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ e, ok := l.entries[instName]
+ if !ok {
+ return fmt.Errorf("unable to update reservation: instance not found %s", instName)
+ }
+ e.createdAt = time.Now()
+ e.instanceID = instID
+ return nil
+}
+
+// Remove releases any reserved resources for an instance and deletes the associated entry.
+// An error is returned if and entry does not exist for the instance.
+func (l *ledger) Remove(instName string) error {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ if err := l.releaseResources(instName); err != nil {
+ return fmt.Errorf("unable to remove instance: %w", err)
+ }
+ delete(l.entries, instName)
+ return nil
+}
+
+// InstanceID retrieves the instance ID for an instance by looking up the instance name.
+// If an instance is not found, an empty string is returned.
+func (l *ledger) InstanceID(instName string) string {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+
+ e, ok := l.entries[instName]
+ if !ok {
+ return ""
+ }
+ return e.instanceID
+}
+
+// SetCPULimit sets the vCPU limit used to determine if a CPU allocation would
+// cross the threshold for available CPU for on-demand instances.
+func (l *ledger) SetCPULimit(numCPU int64) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ l.cpuLimit = numCPU
+}
+
+// UpdateInstanceTypes updates the map of instance types used to map instance
+// type to the resources required for the instance.
+func (l *ledger) UpdateInstanceTypes(types []*cloud.InstanceType) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ for _, it := range types {
+ l.types[it.Type] = it
+ }
+}
+
+// resources contains the current limit and usage of instance related resources.
+type resources struct {
+ // InstCount is the count of how many on-demand instances are tracked in the ledger.
+ InstCount int64
+ // CPUUsed is a count of the vCPU's for on-demand instances are currently allocated in the ledger.
+ CPUUsed int64
+ // CPULimit is the limit of how many vCPU's for on-demand instances can be allocated.
+ CPULimit int64
+}
+
+// Resources retrives the resource usage and limits for instances in the
+// store.
+func (l *ledger) Resources() *resources {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+
+ return &resources{
+ InstCount: int64(len(l.entries)),
+ CPUUsed: l.cpuUsed,
+ CPULimit: l.cpuLimit,
+ }
+}
+
+// ResourceTime give a ResourceTime entry for each active instance.
+// The resource time slice is storted by creation time.
+func (l *ledger) ResourceTime() []ResourceTime {
+ l.mu.RLock()
+ defer l.mu.RUnlock()
+
+ ret := make([]ResourceTime, 0, len(l.entries))
+ for name, data := range l.entries {
+ ret = append(ret, ResourceTime{
+ Name: name,
+ Creation: data.createdAt,
+ })
+ }
+ sort.Sort(ByCreationTime(ret))
+ return ret
+}
diff --git a/internal/coordinator/pool/ledger_test.go b/internal/coordinator/pool/ledger_test.go
new file mode 100644
index 0000000..3349b71
--- /dev/null
+++ b/internal/coordinator/pool/ledger_test.go
@@ -0,0 +1,539 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.13
+// +build linux darwin
+
+package pool
+
+import (
+ "context"
+ "sort"
+ "testing"
+ "time"
+
+ "golang.org/x/build/internal/cloud"
+)
+
+func canceledContext() context.Context {
+ ctx, cancel := context.WithCancel(context.Background())
+ cancel()
+ return ctx
+}
+
+func TestLedgerReserveResources(t *testing.T) {
+ testCases := []struct {
+ desc string
+ ctx context.Context
+ instName string
+ vmType string
+ instTypes []*cloud.InstanceType
+ cpuLimit int64
+ cpuUsed int64
+ wantErr bool
+ }{
+ {
+ desc: "success",
+ ctx: context.Background(),
+ instName: "small-instance",
+ vmType: "aa.small",
+ instTypes: []*cloud.InstanceType{
+ &cloud.InstanceType{
+ Type: "aa.small",
+ CPU: 5,
+ },
+ },
+ cpuLimit: 20,
+ cpuUsed: 5,
+ wantErr: false,
+ },
+ {
+ desc: "cancelled-context",
+ ctx: canceledContext(),
+ instName: "small-instance",
+ vmType: "aa.small",
+ instTypes: []*cloud.InstanceType{
+ &cloud.InstanceType{
+ Type: "aa.small",
+ CPU: 5,
+ },
+ },
+ cpuLimit: 20,
+ cpuUsed: 20,
+ wantErr: true,
+ },
+ {
+ desc: "unknown-instance-type",
+ ctx: context.Background(),
+ instName: "small-instance",
+ vmType: "aa.small",
+ instTypes: []*cloud.InstanceType{},
+ cpuLimit: 20,
+ cpuUsed: 5,
+ wantErr: true,
+ },
+ {
+ desc: "instance-already-exists",
+ ctx: context.Background(),
+ instName: "large-instance",
+ vmType: "aa.small",
+ instTypes: []*cloud.InstanceType{
+ &cloud.InstanceType{
+ Type: "aa.small",
+ CPU: 5,
+ },
+ },
+ cpuLimit: 20,
+ cpuUsed: 5,
+ wantErr: true,
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.desc, func(t *testing.T) {
+ l := &ledger{
+ cpuLimit: tc.cpuLimit,
+ cpuUsed: tc.cpuUsed,
+ entries: map[string]*entry{
+ "large-instance": &entry{},
+ },
+ types: make(map[string]*cloud.InstanceType),
+ }
+ l.UpdateInstanceTypes(tc.instTypes)
+ gotErr := l.ReserveResources(tc.ctx, tc.instName, tc.vmType)
+ if (gotErr != nil) != tc.wantErr {
+ t.Errorf("ledger.reserveResources(%+v, %s, %s) = %s; want error %t", tc.ctx, tc.instName, tc.vmType, gotErr, tc.wantErr)
+ }
+ })
+ }
+}
+
+func TestLedgerReleaseResources(t *testing.T) {
+ testCases := []struct {
+ desc string
+ instName string
+ entry *entry
+ cpuUsed int64
+ wantCPUUsed int64
+ wantErr bool
+ }{
+ {
+ desc: "success",
+ instName: "inst-x",
+ entry: &entry{
+ instanceName: "inst-x",
+ vCPUCount: 10,
+ },
+ cpuUsed: 20,
+ wantCPUUsed: 10,
+ wantErr: false,
+ },
+ {
+ desc: "entry-not-found",
+ instName: "inst-x",
+ entry: &entry{
+ instanceName: "inst-w",
+ vCPUCount: 10,
+ },
+ cpuUsed: 20,
+ wantCPUUsed: 20,
+ wantErr: true,
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.desc, func(t *testing.T) {
+ l := &ledger{
+ cpuUsed: tc.cpuUsed,
+ entries: map[string]*entry{
+ tc.entry.instanceName: tc.entry,
+ },
+ }
+ gotErr := l.releaseResources(tc.instName)
+ if (gotErr != nil) != tc.wantErr {
+ t.Errorf("ledger.releaseResources(%s) = %s; want error %t", tc.instName, gotErr, tc.wantErr)
+ }
+ if l.cpuUsed != tc.wantCPUUsed {
+ t.Errorf("ledger.cpuUsed = %d; wanted %d", l.cpuUsed, tc.wantCPUUsed)
+ }
+ })
+ }
+}
+
+func TestLedgerAllocateCPU(t *testing.T) {
+ testCases := []struct {
+ desc string
+ numCPU int64
+ cpuLimit int64
+ cpuUsed int64
+ instName string
+ wantReserve bool
+ wantCPUUsed int64
+ }{
+ {
+ desc: "reservation-success",
+ numCPU: 10,
+ cpuLimit: 10,
+ cpuUsed: 0,
+ instName: "chacha",
+ wantReserve: true,
+ wantCPUUsed: 10,
+ },
+ {
+ desc: "failed-to-reserve",
+ numCPU: 10,
+ cpuLimit: 5,
+ cpuUsed: 0,
+ instName: "pasa",
+ wantReserve: false,
+ wantCPUUsed: 0,
+ },
+ {
+ desc: "invalid-cpu-count",
+ numCPU: 0,
+ cpuLimit: 50,
+ cpuUsed: 20,
+ instName: "double",
+ wantReserve: false,
+ wantCPUUsed: 20,
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.desc, func(t *testing.T) {
+ l := &ledger{
+ entries: make(map[string]*entry),
+ cpuLimit: tc.cpuLimit,
+ cpuUsed: tc.cpuUsed,
+ }
+ gotReserve := l.allocateCPU(tc.numCPU, tc.instName)
+ if gotReserve != tc.wantReserve {
+ t.Errorf("ledger.allocateCPU(%d) = %v, want %v", tc.numCPU, gotReserve, tc.wantReserve)
+ }
+ if l.cpuUsed != tc.wantCPUUsed {
+ t.Errorf("ledger.cpuUsed = %d; want %d", l.cpuUsed, tc.wantCPUUsed)
+ }
+ if _, ok := l.entries[tc.instName]; tc.wantReserve && !ok {
+ t.Fatalf("ledger.entries[%s] = nil; want it to exist", tc.instName)
+ }
+ if e, _ := l.entries[tc.instName]; tc.wantReserve && e.vCPUCount != tc.numCPU {
+ t.Fatalf("ledger.entries[%s].vCPUCount = %d; want %d", tc.instName, e.vCPUCount, tc.numCPU)
+ }
+ })
+ }
+}
+
+func TestLedgerDeallocateCPU(t *testing.T) {
+ testCases := []struct {
+ desc string
+ numCPU int64
+ cpuUsed int64
+ wantCPUUsed int64
+ }{
+ {
+ desc: "release-success",
+ numCPU: 10,
+ cpuUsed: 10,
+ wantCPUUsed: 0,
+ },
+ {
+ desc: "failed-to-release",
+ numCPU: 10,
+ cpuUsed: 5,
+ wantCPUUsed: 5,
+ },
+ {
+ desc: "invalid-cpu-count",
+ numCPU: 0,
+ cpuUsed: 0,
+ wantCPUUsed: 0,
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.desc, func(t *testing.T) {
+ l := &ledger{
+ cpuUsed: tc.cpuUsed,
+ }
+ l.deallocateCPU(tc.numCPU)
+ if l.cpuUsed != tc.wantCPUUsed {
+ t.Errorf("ledger.cpuUsed = %d; want %d", l.cpuUsed, tc.wantCPUUsed)
+ }
+ })
+ }
+}
+
+func TestLedgerUpdateReservation(t *testing.T) {
+ testCases := []struct {
+ desc string
+ instName string
+ instID string
+ entry *entry
+ wantErr bool
+ }{
+ {
+ desc: "success",
+ instName: "inst-x",
+ instID: "id-foo-x",
+ entry: &entry{
+ instanceName: "inst-x",
+ },
+ wantErr: false,
+ },
+ {
+ desc: "success",
+ instName: "inst-x",
+ instID: "id-foo-x",
+ entry: &entry{
+ instanceName: "inst-w",
+ },
+ wantErr: true,
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.desc, func(t *testing.T) {
+ l := &ledger{
+ entries: map[string]*entry{
+ tc.entry.instanceName: tc.entry,
+ },
+ }
+ if gotErr := l.UpdateReservation(tc.instName, tc.instID); (gotErr != nil) != tc.wantErr {
+ t.Errorf("ledger.updateReservation(%s, %s) = %s; want error %t", tc.instName, tc.instID, gotErr, tc.wantErr)
+ }
+ e, ok := l.entries[tc.instName]
+ if !tc.wantErr && !ok {
+ t.Fatalf("ledger.entries[%s] does not exist", tc.instName)
+ }
+ if !tc.wantErr && e.createdAt.IsZero() {
+ t.Errorf("ledger.entries[%s].createdAt = %s; time not set", tc.instName, e.createdAt)
+ }
+ })
+ }
+}
+
+func TestLedgerRemove(t *testing.T) {
+ testCases := []struct {
+ desc string
+ instName string
+ entry *entry
+ cpuUsed int64
+ wantCPUUsed int64
+ wantErr bool
+ }{
+ {
+ desc: "success",
+ instName: "inst-x",
+ entry: &entry{
+ instanceName: "inst-x",
+ vCPUCount: 10,
+ },
+ cpuUsed: 100,
+ wantCPUUsed: 90,
+ wantErr: false,
+ },
+ {
+ desc: "entry-does-not-exist",
+ instName: "inst-x",
+ entry: &entry{
+ instanceName: "inst-w",
+ vCPUCount: 10,
+ },
+ cpuUsed: 100,
+ wantCPUUsed: 100,
+ wantErr: true,
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.desc, func(t *testing.T) {
+ l := &ledger{
+ cpuUsed: tc.cpuUsed,
+ entries: map[string]*entry{
+ tc.entry.instanceName: tc.entry,
+ },
+ }
+ if gotErr := l.Remove(tc.instName); (gotErr != nil) != tc.wantErr {
+ t.Errorf("ledger.remove(%s) = %s; want error %t", tc.instName, gotErr, tc.wantErr)
+ }
+ if gotE, ok := l.entries[tc.instName]; ok {
+ t.Errorf("ledger.entries[%s] = %+v; want it not to exist", tc.instName, gotE)
+ }
+ if l.cpuUsed != tc.wantCPUUsed {
+ t.Errorf("ledger.cpuUsed = %d; want %d", l.cpuUsed, tc.wantCPUUsed)
+ }
+ })
+ }
+}
+
+func TestInstanceID(t *testing.T) {
+ testCases := []struct {
+ desc string
+ instName string
+ entry *entry
+ cpuUsed int64
+ wantCPUUsed int64
+ wantErr bool
+ }{
+ {
+ desc: "success",
+ instName: "inst-x",
+ entry: &entry{
+ instanceName: "inst-x",
+ vCPUCount: 10,
+ },
+ cpuUsed: 100,
+ wantCPUUsed: 90,
+ wantErr: false,
+ },
+ {
+ desc: "entry-does-not-exist",
+ instName: "inst-x",
+ entry: &entry{
+ instanceName: "inst-w",
+ vCPUCount: 10,
+ },
+ cpuUsed: 100,
+ wantCPUUsed: 100,
+ wantErr: true,
+ },
+ }
+ for _, tc := range testCases {
+ t.Run(tc.desc, func(t *testing.T) {
+ l := &ledger{
+ cpuUsed: tc.cpuUsed,
+ entries: map[string]*entry{
+ tc.entry.instanceName: tc.entry,
+ },
+ }
+ if gotErr := l.Remove(tc.instName); (gotErr != nil) != tc.wantErr {
+ t.Errorf("ledger.remove(%s) = %s; want error %t", tc.instName, gotErr, tc.wantErr)
+ }
+ if gotE, ok := l.entries[tc.instName]; ok {
+ t.Errorf("ledger.entries[%s] = %+v; want it not to exist", tc.instName, gotE)
+ }
+ if l.cpuUsed != tc.wantCPUUsed {
+ t.Errorf("ledger.cpuUsed = %d; want %d", l.cpuUsed, tc.wantCPUUsed)
+ }
+ })
+ }
+}
+
+func TestLedgerSetCPULimit(t *testing.T) {
+ l := &ledger{}
+ var want int64 = 300
+ l.SetCPULimit(300)
+ if l.cpuLimit != want {
+ t.Errorf("ledger.cpuLimit = %d; want %d", l.cpuLimit, want)
+ }
+}
+
+func TestLedgerUpdateInstanceTypes(t *testing.T) {
+ testCases := []struct {
+ desc string
+ types []*cloud.InstanceType
+ }{
+ {"no-type", []*cloud.InstanceType{}},
+ {"single-type", []*cloud.InstanceType{&cloud.InstanceType{"x", 15}}},
+ }
+ for _, tc := range testCases {
+ t.Run(tc.desc, func(t *testing.T) {
+ l := newLedger()
+ l.UpdateInstanceTypes(tc.types)
+ for _, it := range tc.types {
+ if gotV, ok := l.types[it.Type]; !ok || gotV != it {
+ t.Errorf("ledger.types[%s] = %v; want %v", it.Type, gotV, it)
+ }
+ }
+ if len(l.types) != len(tc.types) {
+ t.Errorf("len(ledger.types) = %d; want %d", len(l.types), len(tc.types))
+ }
+ })
+ }
+}
+
+func TestLedgerResources(t *testing.T) {
+ testCases := []struct {
+ desc string
+ entries map[string]*entry
+ cpuCount int64
+ cpuLimit int64
+ wantInstCount int64
+ }{
+ {"no-instances", map[string]*entry{}, 2, 3, 0},
+ {"single-instance", map[string]*entry{"x": &entry{}}, 2, 3, 1},
+ }
+ for _, tc := range testCases {
+ t.Run(tc.desc, func(t *testing.T) {
+ l := &ledger{
+ entries: tc.entries,
+ cpuLimit: tc.cpuLimit,
+ cpuUsed: tc.cpuCount,
+ }
+ gotR := l.Resources()
+ if gotR.InstCount != tc.wantInstCount {
+ t.Errorf("ledger.instCount = %d; want %d", gotR.InstCount, tc.wantInstCount)
+ }
+ if gotR.CPUUsed != tc.cpuCount {
+ t.Errorf("ledger.cpuCount = %d; want %d", gotR.CPUUsed, tc.cpuCount)
+ }
+ if gotR.CPULimit != tc.cpuLimit {
+ t.Errorf("ledger.cpuLimit = %d; want %d", gotR.CPULimit, tc.cpuLimit)
+ }
+ })
+ }
+}
+
+func TestLedgerResourceTime(t *testing.T) {
+ ct := time.Now()
+
+ testCases := []struct {
+ desc string
+ entries map[string]*entry
+ }{
+ {"no-instances", map[string]*entry{}},
+ {"single-instance", map[string]*entry{
+ "inst-x": &entry{
+ createdAt: ct,
+ instanceID: "id-x",
+ instanceName: "inst-x",
+ vCPUCount: 1,
+ },
+ }},
+ {"multiple-instances", map[string]*entry{
+ "inst-z": &entry{
+ createdAt: ct.Add(2 * time.Second),
+ instanceID: "id-z",
+ instanceName: "inst-z",
+ vCPUCount: 1,
+ },
+ "inst-y": &entry{
+ createdAt: ct.Add(time.Second),
+ instanceID: "id-y",
+ instanceName: "inst-y",
+ vCPUCount: 1,
+ },
+ "inst-x": &entry{
+ createdAt: ct,
+ instanceID: "id-x",
+ instanceName: "inst-x",
+ vCPUCount: 1,
+ },
+ }},
+ }
+ for _, tc := range testCases {
+ t.Run(tc.desc, func(t *testing.T) {
+ l := &ledger{
+ entries: tc.entries,
+ }
+ gotRT := l.ResourceTime()
+ if !sort.SliceIsSorted(gotRT, func(i, j int) bool { return gotRT[i].Creation.Before(gotRT[j].Creation) }) {
+ t.Errorf("resource time is not sorted")
+ }
+ if len(l.entries) != len(gotRT) {
+ t.Errorf("mismatch in items returned")
+ }
+ for _, rt := range gotRT {
+ delete(l.entries, rt.Name)
+ }
+ if len(l.entries) != 0 {
+ t.Errorf("mismatch")
+ }
+ })
+ }
+}