From fa395ea093784bdfc3aa3bc792d66bc97b8161ce Mon Sep 17 00:00:00 2001 From: mblos Date: Thu, 26 Mar 2026 13:52:14 +0100 Subject: [PATCH 1/9] Commitment API endpoints report ram,cores,instances, but CRs are managed only for ram --- .../reservations/commitments/api_info.go | 88 +++++++++++++------ .../reservations/commitments/api_info_test.go | 74 ++++++++++------ .../commitments/api_report_capacity_test.go | 32 +++++-- .../commitments/api_report_usage_test.go | 37 ++++++-- .../reservations/commitments/capacity.go | 38 ++++++-- .../reservations/commitments/state.go | 54 +++++++++--- .../reservations/commitments/state_test.go | 4 +- .../reservations/commitments/usage.go | 88 ++++++++++++++----- .../reservations/commitments/usage_test.go | 5 +- 9 files changed, 308 insertions(+), 112 deletions(-) diff --git a/internal/scheduling/reservations/commitments/api_info.go b/internal/scheduling/reservations/commitments/api_info.go index 9b2b8eb4a..150fee5a9 100644 --- a/internal/scheduling/reservations/commitments/api_info.go +++ b/internal/scheduling/reservations/commitments/api_info.go @@ -92,6 +92,10 @@ type resourceAttributes struct { } // buildServiceInfo constructs the ServiceInfo response with metadata for all flavor groups. +// For each flavor group that accepts commitments, three resources are registered: +// - _ram: RAM resource (unit = multiples of smallest flavor RAM, HandlesCommitments=true) +// - _cores: CPU cores resource (unit = 1, HandlesCommitments=false) +// - _instances: Instance count resource (unit = 1, HandlesCommitments=false) func (api *HTTPAPI) buildServiceInfo(ctx context.Context, logger logr.Logger) (liquid.ServiceInfo, error) { // Get all flavor groups from Knowledge CRDs knowledge := &reservations.FlavorGroupKnowledgeClient{Client: api.client} @@ -107,22 +111,19 @@ func (api *HTTPAPI) buildServiceInfo(ctx context.Context, logger logr.Logger) (l // Build resources map resources := make(map[liquid.ResourceName]liquid.ResourceInfo) for groupName, groupData := range flavorGroups { - resourceName := liquid.ResourceName(ResourceNameFromFlavorGroup(groupName)) + // Only handle commitments for groups with a fixed RAM/core ratio + handlesCommitments := FlavorGroupAcceptsCommitments(&groupData) + if !handlesCommitments { + continue // Skip groups that don't accept commitments + } flavorNames := make([]string, 0, len(groupData.Flavors)) for _, flavor := range groupData.Flavors { flavorNames = append(flavorNames, flavor.Name) } - displayName := fmt.Sprintf( - "multiples of %d MiB (usable by: %s)", - groupData.SmallestFlavor.MemoryMB, - strings.Join(flavorNames, ", "), - ) - - // Only handle commitments for groups with a fixed RAM/core ratio - handlesCommitments := FlavorGroupAcceptsCommitments(&groupData) + flavorListStr := strings.Join(flavorNames, ", ") - // Build attributes JSON with ratio info + // Build attributes JSON with ratio info (shared across all resource types) attrs := resourceAttributes{ RamCoreRatio: groupData.RamCoreRatio, RamCoreRatioMin: groupData.RamCoreRatioMin, @@ -130,44 +131,79 @@ func (api *HTTPAPI) buildServiceInfo(ctx context.Context, logger logr.Logger) (l } attrsJSON, err := json.Marshal(attrs) if err != nil { - logger.Error(err, "failed to marshal resource attributes", "resourceName", resourceName) + logger.Error(err, "failed to marshal resource attributes", "flavorGroup", groupName) attrsJSON = nil } - // Build unit from smallest flavor memory (e.g., "131072 MiB" for 128 GiB) // Validate memory is positive to avoid panic in MultiplyBy (which panics on factor=0) if groupData.SmallestFlavor.MemoryMB == 0 { return liquid.ServiceInfo{}, fmt.Errorf("%w: flavor group %q has invalid smallest flavor with memoryMB=0", errInternalServiceInfo, groupName) } - unit, err := liquid.UnitMebibytes.MultiplyBy(groupData.SmallestFlavor.MemoryMB) + + // === 1. RAM Resource === + ramResourceName := liquid.ResourceName(ResourceNameRAM(groupName)) + ramUnit, err := liquid.UnitMebibytes.MultiplyBy(groupData.SmallestFlavor.MemoryMB) if err != nil { // Note: This error only occurs on uint64 overflow, which is unrealistic for memory values return liquid.ServiceInfo{}, fmt.Errorf("%w: failed to create unit for flavor group %q: %w", errInternalServiceInfo, groupName, err) } - - resources[resourceName] = liquid.ResourceInfo{ - DisplayName: displayName, - Unit: unit, // Non-standard unit: multiples of smallest flavor RAM + resources[ramResourceName] = liquid.ResourceInfo{ + DisplayName: fmt.Sprintf( + "multiples of %d MiB (usable by: %s)", + groupData.SmallestFlavor.MemoryMB, + flavorListStr, + ), + Unit: ramUnit, // Non-standard unit: multiples of smallest flavor RAM Topology: liquid.AZAwareTopology, // Commitments are per-AZ NeedsResourceDemand: false, // Capacity planning out of scope for now - HasCapacity: handlesCommitments, // We report capacity via /commitments/v1/report-capacity only for groups that accept commitments + HasCapacity: true, // We report capacity via /commitments/v1/report-capacity HasQuota: false, // No quota enforcement as of now - HandlesCommitments: handlesCommitments, // Only for groups with fixed RAM/core ratio + HandlesCommitments: true, // RAM is the primary commitment resource Attributes: attrsJSON, } - logger.V(1).Info("registered flavor group resource", - "resourceName", resourceName, + // === 2. Cores Resource === + coresResourceName := liquid.ResourceName(ResourceNameCores(groupName)) + resources[coresResourceName] = liquid.ResourceInfo{ + DisplayName: fmt.Sprintf( + "CPU cores (usable by: %s)", + flavorListStr, + ), + Unit: liquid.UnitNone, // Unit = 1 (count of cores) + Topology: liquid.AZAwareTopology, // Same topology as RAM + NeedsResourceDemand: false, + HasCapacity: true, // We report capacity (as 0 for now) + HasQuota: false, // No quota enforcement + HandlesCommitments: false, // Cores are derived from RAM commitments + Attributes: attrsJSON, // Same attributes (ratio info) + } + + // === 3. Instances Resource === + instancesResourceName := liquid.ResourceName(ResourceNameInstances(groupName)) + resources[instancesResourceName] = liquid.ResourceInfo{ + DisplayName: fmt.Sprintf( + "instances (usable by: %s)", + flavorListStr, + ), + Unit: liquid.UnitNone, // Unit = 1 (count of instances) + Topology: liquid.AZAwareTopology, // Same topology as RAM + NeedsResourceDemand: false, + HasCapacity: true, // We report capacity (as 0 for now) + HasQuota: false, // No quota enforcement + HandlesCommitments: false, // Instances are derived from RAM commitments + Attributes: attrsJSON, // Same attributes + } + + logger.V(1).Info("registered flavor group resources", "flavorGroup", groupName, - "displayName", displayName, + "ramResource", ramResourceName, + "coresResource", coresResourceName, + "instancesResource", instancesResourceName, "smallestFlavor", groupData.SmallestFlavor.Name, "smallestRamMB", groupData.SmallestFlavor.MemoryMB, - "handlesCommitments", handlesCommitments, - "ramCoreRatio", groupData.RamCoreRatio, - "ramCoreRatioMin", groupData.RamCoreRatioMin, - "ramCoreRatioMax", groupData.RamCoreRatioMax) + "ramCoreRatio", groupData.RamCoreRatio) } // Get last content changed from flavor group knowledge and treat it as version diff --git a/internal/scheduling/reservations/commitments/api_info_test.go b/internal/scheduling/reservations/commitments/api_info_test.go index 256709957..efcb2d790 100644 --- a/internal/scheduling/reservations/commitments/api_info_test.go +++ b/internal/scheduling/reservations/commitments/api_info_test.go @@ -138,8 +138,11 @@ func TestHandleInfo_InvalidFlavorMemory(t *testing.T) { } func TestHandleInfo_HasCapacityEqualsHandlesCommitments(t *testing.T) { - // Test that HasCapacity == HandlesCommitments for all resources - // Both should be true only for groups with fixed RAM/core ratio + // Test that for flavor groups that accept commitments: + // - Three resources are created: _ram, _cores, _instances + // - Only _ram has HandlesCommitments=true + // - All three have HasCapacity=true + // Groups that DON'T accept commitments are skipped entirely scheme := runtime.NewScheme() if err := v1alpha1.AddToScheme(scheme); err != nil { t.Fatalf("failed to add scheme: %v", err) @@ -148,7 +151,8 @@ func TestHandleInfo_HasCapacityEqualsHandlesCommitments(t *testing.T) { // Create flavor groups knowledge with both fixed and variable ratio groups features := []map[string]interface{}{ { - // Group with fixed ratio - should accept commitments (HasCapacity=true, HandlesCommitments=true) + // Group with fixed ratio - should accept commitments + // Creates 3 resources: _ram, _cores, _instances "name": "hana_fixed", "flavors": []map[string]interface{}{ {"name": "hana_c4_m16", "vcpus": 4, "memoryMB": 16384, "diskGB": 50}, @@ -159,7 +163,8 @@ func TestHandleInfo_HasCapacityEqualsHandlesCommitments(t *testing.T) { "ramCoreRatio": 4096, // Fixed: 4096 MiB per vCPU for all flavors }, { - // Group with variable ratio - should NOT accept commitments (HasCapacity=false, HandlesCommitments=false) + // Group with variable ratio - should NOT accept commitments + // Will be SKIPPED entirely (no resources created) "name": "v2_variable", "flavors": []map[string]interface{}{ {"name": "v2_c4_m8", "vcpus": 4, "memoryMB": 8192, "diskGB": 50}, // 2048 MiB/vCPU @@ -213,43 +218,56 @@ func TestHandleInfo_HasCapacityEqualsHandlesCommitments(t *testing.T) { t.Fatalf("failed to decode response: %v", err) } - // Verify we have both resources - if len(serviceInfo.Resources) != 2 { - t.Fatalf("expected 2 resources, got %d", len(serviceInfo.Resources)) + // Verify we have 3 resources for the fixed ratio group (variable ratio is skipped) + // hana_fixed generates: _ram, _cores, _instances + if len(serviceInfo.Resources) != 3 { + t.Fatalf("expected 3 resources (_ram, _cores, _instances for hana_fixed), got %d", len(serviceInfo.Resources)) } - // Test fixed ratio group: hw_version_hana_fixed_ram - fixedResource, ok := serviceInfo.Resources["hw_version_hana_fixed_ram"] + // Test RAM resource: hw_version_hana_fixed_ram + ramResource, ok := serviceInfo.Resources["hw_version_hana_fixed_ram"] if !ok { t.Fatal("expected hw_version_hana_fixed_ram resource to exist") } - if !fixedResource.HasCapacity { + if !ramResource.HasCapacity { t.Error("hw_version_hana_fixed_ram: expected HasCapacity=true") } - if !fixedResource.HandlesCommitments { - t.Error("hw_version_hana_fixed_ram: expected HandlesCommitments=true (fixed ratio group)") + if !ramResource.HandlesCommitments { + t.Error("hw_version_hana_fixed_ram: expected HandlesCommitments=true (RAM is primary commitment resource)") } - if fixedResource.HasCapacity != fixedResource.HandlesCommitments { - t.Errorf("hw_version_hana_fixed_ram: HasCapacity (%v) should equal HandlesCommitments (%v)", - fixedResource.HasCapacity, fixedResource.HandlesCommitments) + + // Test Cores resource: hw_version_hana_fixed_cores + coresResource, ok := serviceInfo.Resources["hw_version_hana_fixed_cores"] + if !ok { + t.Fatal("expected hw_version_hana_fixed_cores resource to exist") + } + if !coresResource.HasCapacity { + t.Error("hw_version_hana_fixed_cores: expected HasCapacity=true") + } + if coresResource.HandlesCommitments { + t.Error("hw_version_hana_fixed_cores: expected HandlesCommitments=false (cores are derived)") } - // Test variable ratio group: hw_version_v2_variable_ram - variableResource, ok := serviceInfo.Resources["hw_version_v2_variable_ram"] + // Test Instances resource: hw_version_hana_fixed_instances + instancesResource, ok := serviceInfo.Resources["hw_version_hana_fixed_instances"] if !ok { - t.Fatal("expected hw_version_v2_variable_ram resource to exist") + t.Fatal("expected hw_version_hana_fixed_instances resource to exist") } - // Variable ratio groups don't accept commitments, and we only report capacity for groups - // that accept commitments, so both HasCapacity and HandlesCommitments should be false - if variableResource.HasCapacity { - t.Error("hw_version_v2_variable_ram: expected HasCapacity=false (variable ratio groups don't report capacity)") + if !instancesResource.HasCapacity { + t.Error("hw_version_hana_fixed_instances: expected HasCapacity=true") + } + if instancesResource.HandlesCommitments { + t.Error("hw_version_hana_fixed_instances: expected HandlesCommitments=false (instances are derived)") + } + + // Variable ratio group should NOT have any resources (skipped entirely) + if _, ok := serviceInfo.Resources["hw_version_v2_variable_ram"]; ok { + t.Error("hw_version_v2_variable_ram should NOT exist (variable ratio groups are skipped)") } - if variableResource.HandlesCommitments { - t.Error("hw_version_v2_variable_ram: expected HandlesCommitments=false (variable ratio group)") + if _, ok := serviceInfo.Resources["hw_version_v2_variable_cores"]; ok { + t.Error("hw_version_v2_variable_cores should NOT exist (variable ratio groups are skipped)") } - // Verify HasCapacity == HandlesCommitments for consistency - if variableResource.HasCapacity != variableResource.HandlesCommitments { - t.Errorf("hw_version_v2_variable_ram: HasCapacity (%v) should equal HandlesCommitments (%v)", - variableResource.HasCapacity, variableResource.HandlesCommitments) + if _, ok := serviceInfo.Resources["hw_version_v2_variable_instances"]; ok { + t.Error("hw_version_v2_variable_instances should NOT exist (variable ratio groups are skipped)") } } diff --git a/internal/scheduling/reservations/commitments/api_report_capacity_test.go b/internal/scheduling/reservations/commitments/api_report_capacity_test.go index 4dd642c0e..a0173f1f4 100644 --- a/internal/scheduling/reservations/commitments/api_report_capacity_test.go +++ b/internal/scheduling/reservations/commitments/api_report_capacity_test.go @@ -183,18 +183,36 @@ func TestCapacityCalculator(t *testing.T) { t.Fatalf("Expected no error, got: %v", err) } - if len(report.Resources) != 1 { - t.Fatalf("Expected 1 resource, got %d", len(report.Resources)) + // Now we have 3 resources per flavor group: _ram, _cores, _instances + if len(report.Resources) != 3 { + t.Fatalf("Expected 3 resources (_ram, _cores, _instances), got %d", len(report.Resources)) } - resource := report.Resources[liquid.ResourceName("hw_version_test-group_ram")] - if resource == nil { + // Check RAM resource + ramResource := report.Resources[liquid.ResourceName("hw_version_test-group_ram")] + if ramResource == nil { t.Fatal("Expected hw_version_test-group_ram resource to exist") } + if len(ramResource.PerAZ) != 0 { + t.Errorf("Expected 0 AZs for RAM resource, got %d", len(ramResource.PerAZ)) + } - // Should have empty perAZ map when no host details - if len(resource.PerAZ) != 0 { - t.Errorf("Expected 0 AZs, got %d", len(resource.PerAZ)) + // Check Cores resource + coresResource := report.Resources[liquid.ResourceName("hw_version_test-group_cores")] + if coresResource == nil { + t.Fatal("Expected hw_version_test-group_cores resource to exist") + } + if len(coresResource.PerAZ) != 0 { + t.Errorf("Expected 0 AZs for Cores resource, got %d", len(coresResource.PerAZ)) + } + + // Check Instances resource + instancesResource := report.Resources[liquid.ResourceName("hw_version_test-group_instances")] + if instancesResource == nil { + t.Fatal("Expected hw_version_test-group_instances resource to exist") + } + if len(instancesResource.PerAZ) != 0 { + t.Errorf("Expected 0 AZs for Instances resource, got %d", len(instancesResource.PerAZ)) } }) } diff --git a/internal/scheduling/reservations/commitments/api_report_usage_test.go b/internal/scheduling/reservations/commitments/api_report_usage_test.go index 26a55332d..e4baf8170 100644 --- a/internal/scheduling/reservations/commitments/api_report_usage_test.go +++ b/internal/scheduling/reservations/commitments/api_report_usage_test.go @@ -656,12 +656,22 @@ func verifyUsageReport(t *testing.T, tc UsageReportTestCase, actual liquid.Servi t.Helper() for resourceName, expectedResource := range tc.Expected { + // The test uses _ram resources in Expected, but: + // - _ram resource has usage but NO subresources + // - _instances resource has usage (count) AND subresources (VM details) + // So we check _ram for usage and derive _instances for VM subresources + actualResource, exists := actual.Resources[liquid.ResourceName(resourceName)] if !exists { t.Errorf("Resource %s not found in response", resourceName) continue } + // Derive the instances resource name from the ram resource name + // hw_version_hana_1_ram -> hw_version_hana_1_instances + instancesResourceName := resourceName[:len(resourceName)-4] + "_instances" // replace "_ram" with "_instances" + actualInstancesResource := actual.Resources[liquid.ResourceName(instancesResourceName)] + for azName, expectedAZ := range expectedResource.PerAZ { az := liquid.AvailabilityZone(azName) actualAZ, exists := actualResource.PerAZ[az] @@ -670,22 +680,33 @@ func verifyUsageReport(t *testing.T, tc UsageReportTestCase, actual liquid.Servi continue } - // Verify usage + // Verify RAM usage if actualAZ.Usage != expectedAZ.Usage { t.Errorf("Resource %s AZ %s: expected usage %d, got %d", resourceName, azName, expectedAZ.Usage, actualAZ.Usage) } + // VM subresources are on the _instances resource, not _ram + if actualInstancesResource == nil { + t.Errorf("Instances resource %s not found", instancesResourceName) + continue + } + actualInstancesAZ, exists := actualInstancesResource.PerAZ[az] + if !exists { + t.Errorf("AZ %s not found in instances resource %s", azName, instancesResourceName) + continue + } + // Verify VM count - if len(actualAZ.Subresources) != len(expectedAZ.VMs) { + if len(actualInstancesAZ.Subresources) != len(expectedAZ.VMs) { t.Errorf("Resource %s AZ %s: expected %d VMs, got %d", - resourceName, azName, len(expectedAZ.VMs), len(actualAZ.Subresources)) + instancesResourceName, azName, len(expectedAZ.VMs), len(actualInstancesAZ.Subresources)) continue } // Build actual VM map for comparison (parse attributes) actualVMs := make(map[string]vmAttributes) - for _, sub := range actualAZ.Subresources { + for _, sub := range actualInstancesAZ.Subresources { var attrs vmAttributes attrs.ID = sub.ID if err := json.Unmarshal(sub.Attributes, &attrs); err != nil { @@ -699,7 +720,7 @@ func verifyUsageReport(t *testing.T, tc UsageReportTestCase, actual liquid.Servi for _, expectedVM := range expectedAZ.VMs { actualVM, exists := actualVMs[expectedVM.UUID] if !exists { - t.Errorf("Resource %s AZ %s: VM %s not found", resourceName, azName, expectedVM.UUID) + t.Errorf("Resource %s AZ %s: VM %s not found", instancesResourceName, azName, expectedVM.UUID) continue } @@ -707,17 +728,17 @@ func verifyUsageReport(t *testing.T, tc UsageReportTestCase, actual liquid.Servi if actualVM.CommitmentID != expectedVM.CommitmentID { if expectedVM.CommitmentID == "" { t.Errorf("Resource %s AZ %s VM %s: expected PAYG (empty), got commitment %s", - resourceName, azName, expectedVM.UUID, actualVM.CommitmentID) + instancesResourceName, azName, expectedVM.UUID, actualVM.CommitmentID) } else { t.Errorf("Resource %s AZ %s VM %s: expected commitment %s, got %s", - resourceName, azName, expectedVM.UUID, expectedVM.CommitmentID, actualVM.CommitmentID) + instancesResourceName, azName, expectedVM.UUID, expectedVM.CommitmentID, actualVM.CommitmentID) } } // Verify memory if actualVM.RAM != expectedVM.MemoryMB { t.Errorf("Resource %s AZ %s VM %s: expected RAM %d MB, got %d MB", - resourceName, azName, expectedVM.UUID, expectedVM.MemoryMB, actualVM.RAM) + instancesResourceName, azName, expectedVM.UUID, expectedVM.MemoryMB, actualVM.RAM) } } } diff --git a/internal/scheduling/reservations/commitments/capacity.go b/internal/scheduling/reservations/commitments/capacity.go index 7726b1dbe..415a45ae0 100644 --- a/internal/scheduling/reservations/commitments/capacity.go +++ b/internal/scheduling/reservations/commitments/capacity.go @@ -27,6 +27,7 @@ func NewCapacityCalculator(client client.Client) *CapacityCalculator { // CalculateCapacity computes per-AZ capacity for all flavor groups that accept commitments. // Only flavor groups with a fixed RAM/core ratio are included in the report. +// For each flavor group, three resources are reported: _ram, _cores, _instances. func (c *CapacityCalculator) CalculateCapacity(ctx context.Context) (liquid.ServiceCapacityReport, error) { // Get all flavor groups from Knowledge CRDs knowledge := &reservations.FlavorGroupKnowledgeClient{Client: c.client} @@ -53,23 +54,50 @@ func (c *CapacityCalculator) CalculateCapacity(ctx context.Context) (liquid.Serv continue } - // Resource name follows pattern: hw_version__ram - resourceName := liquid.ResourceName(ResourceNameFromFlavorGroup(groupName)) - - // Calculate per-AZ capacity and usage + // Calculate per-AZ capacity (placeholder: capacity=0 for all resources) azCapacity, err := c.calculateAZCapacity(ctx, groupName, groupData) if err != nil { return liquid.ServiceCapacityReport{}, fmt.Errorf("failed to calculate capacity for %s: %w", groupName, err) } - report.Resources[resourceName] = &liquid.ResourceCapacityReport{ + // === 1. RAM Resource === + ramResourceName := liquid.ResourceName(ResourceNameRAM(groupName)) + report.Resources[ramResourceName] = &liquid.ResourceCapacityReport{ PerAZ: azCapacity, } + + // === 2. Cores Resource === + coresResourceName := liquid.ResourceName(ResourceNameCores(groupName)) + report.Resources[coresResourceName] = &liquid.ResourceCapacityReport{ + PerAZ: c.copyAZCapacity(azCapacity), + } + + // === 3. Instances Resource === + instancesResourceName := liquid.ResourceName(ResourceNameInstances(groupName)) + report.Resources[instancesResourceName] = &liquid.ResourceCapacityReport{ + PerAZ: c.copyAZCapacity(azCapacity), + } } return report, nil } +// copyAZCapacity creates a deep copy of the AZ capacity map. +// This is needed because each resource needs its own map instance. +func (c *CapacityCalculator) copyAZCapacity( + src map[liquid.AvailabilityZone]*liquid.AZResourceCapacityReport, +) map[liquid.AvailabilityZone]*liquid.AZResourceCapacityReport { + + result := make(map[liquid.AvailabilityZone]*liquid.AZResourceCapacityReport, len(src)) + for az, report := range src { + result[az] = &liquid.AZResourceCapacityReport{ + Capacity: report.Capacity, + Usage: report.Usage, + } + } + return result +} + func (c *CapacityCalculator) calculateAZCapacity( ctx context.Context, _ string, // groupName - reserved for future use diff --git a/internal/scheduling/reservations/commitments/state.go b/internal/scheduling/reservations/commitments/state.go index 4ed288d16..4a30cb814 100644 --- a/internal/scheduling/reservations/commitments/state.go +++ b/internal/scheduling/reservations/commitments/state.go @@ -18,30 +18,56 @@ import ( // commitmentUUIDPattern validates commitment UUID format. var commitmentUUIDPattern = regexp.MustCompile(`^[a-zA-Z0-9-]{6,40}$`) -// Limes LIQUID resource naming convention: hw_version__ram +// Limes LIQUID resource naming convention: hw_version__ +// Supported resource types: _ram, _cores, _instances const ( resourceNamePrefix = "hw_version_" - resourceNameSuffix = "_ram" + // Resource type suffixes + ResourceSuffixRAM = "_ram" + ResourceSuffixCores = "_cores" + ResourceSuffixInstances = "_instances" ) -// ResourceNameFromFlavorGroup creates a LIQUID resource name from a flavor group name. +// ResourceNameRAM creates a LIQUID resource name for RAM from a flavor group name. // Format: hw_version__ram -func ResourceNameFromFlavorGroup(flavorGroup string) string { - return resourceNamePrefix + flavorGroup + resourceNameSuffix +func ResourceNameRAM(flavorGroup string) string { + return resourceNamePrefix + flavorGroup + ResourceSuffixRAM } +// ResourceNameCores creates a LIQUID resource name for CPU cores from a flavor group name. +// Format: hw_version__cores +func ResourceNameCores(flavorGroup string) string { + return resourceNamePrefix + flavorGroup + ResourceSuffixCores +} + +// ResourceNameInstances creates a LIQUID resource name for instance count from a flavor group name. +// Format: hw_version__instances +func ResourceNameInstances(flavorGroup string) string { + return resourceNamePrefix + flavorGroup + ResourceSuffixInstances +} + +// getFlavorGroupNameFromResource extracts the flavor group name from a LIQUID resource name. +// Supports all resource types: _ram, _cores, _instances func getFlavorGroupNameFromResource(resourceName string) (string, error) { - if !strings.HasPrefix(resourceName, resourceNamePrefix) || !strings.HasSuffix(resourceName, resourceNameSuffix) { - return "", fmt.Errorf("invalid resource name: %s", resourceName) + if !strings.HasPrefix(resourceName, resourceNamePrefix) { + return "", fmt.Errorf("invalid resource name: %s (missing prefix)", resourceName) } - // Remove prefix and suffix - name := strings.TrimPrefix(resourceName, resourceNamePrefix) - name = strings.TrimSuffix(name, resourceNameSuffix) - // Validate that the extracted group name is not empty - if name == "" { - return "", fmt.Errorf("invalid resource name: %s (empty group name)", resourceName) + + // Try each known suffix + for _, suffix := range []string{ResourceSuffixRAM, ResourceSuffixCores, ResourceSuffixInstances} { + if strings.HasSuffix(resourceName, suffix) { + // Remove prefix and suffix + name := strings.TrimPrefix(resourceName, resourceNamePrefix) + name = strings.TrimSuffix(name, suffix) + // Validate that the extracted group name is not empty + if name == "" { + return "", fmt.Errorf("invalid resource name: %s (empty group name)", resourceName) + } + return name, nil + } } - return name, nil + + return "", fmt.Errorf("invalid resource name: %s (unknown suffix)", resourceName) } // CommitmentState represents desired or current commitment resource allocation. diff --git a/internal/scheduling/reservations/commitments/state_test.go b/internal/scheduling/reservations/commitments/state_test.go index 717069708..fcdbf9f84 100644 --- a/internal/scheduling/reservations/commitments/state_test.go +++ b/internal/scheduling/reservations/commitments/state_test.go @@ -261,9 +261,9 @@ func TestGetFlavorGroupNameFromResource_Invalid(t *testing.T) { } func TestResourceNameRoundTrip(t *testing.T) { - // Test that ResourceNameFromFlavorGroup and getFlavorGroupNameFromResource are inverses + // Test that ResourceNameRAM and getFlavorGroupNameFromResource are inverses for _, groupName := range []string{"2101", "hana_1", "hana_medium_v2"} { - resourceName := ResourceNameFromFlavorGroup(groupName) + resourceName := ResourceNameRAM(groupName) recovered, err := getFlavorGroupNameFromResource(resourceName) if err != nil { t.Fatalf("round-trip failed for %q: %v", groupName, err) diff --git a/internal/scheduling/reservations/commitments/usage.go b/internal/scheduling/reservations/commitments/usage.go index 536440209..ba5681732 100644 --- a/internal/scheduling/reservations/commitments/usage.go +++ b/internal/scheduling/reservations/commitments/usage.go @@ -335,8 +335,17 @@ func (c *UsageCalculator) assignVMsToCommitments( return vmAssignments, assignedCount } +// azUsageData aggregates usage data for a specific flavor group and AZ. +type azUsageData struct { + ramUsage uint64 // RAM usage in multiples of smallest flavor + coresUsage uint64 // Total vCPU count + instanceCount uint64 // Number of VMs + subresources []liquid.Subresource // VM details for subresource reporting +} + // buildUsageResponse constructs the Liquid API ServiceUsageReport. // Only flavor groups that accept commitments are included in the report. +// For each flavor group, three resources are reported: _ram, _cores, _instances. func (c *UsageCalculator) buildUsageResponse( vms []VMUsageInfo, vmAssignments map[string]string, @@ -348,10 +357,6 @@ func (c *UsageCalculator) buildUsageResponse( resources := make(map[liquid.ResourceName]*liquid.ResourceUsageReport) // Group VMs by flavor group and AZ for aggregation - type azUsageData struct { - usage uint64 - subresources []liquid.Subresource - } usageByFlavorGroupAZ := make(map[string]map[liquid.AvailabilityZone]*azUsageData) for _, vm := range vms { @@ -368,8 +373,10 @@ func (c *UsageCalculator) buildUsageResponse( usageByFlavorGroupAZ[vm.FlavorGroup][az] = &azUsageData{} } - // Accumulate usage - usageByFlavorGroupAZ[vm.FlavorGroup][az].usage += vm.UsageMultiple + // Accumulate usage for all resource types + usageByFlavorGroupAZ[vm.FlavorGroup][az].ramUsage += vm.UsageMultiple + usageByFlavorGroupAZ[vm.FlavorGroup][az].coresUsage += vm.VCPUs + usageByFlavorGroupAZ[vm.FlavorGroup][az].instanceCount++ // Build subresource attributes commitmentID := vmAssignments[vm.UUID] @@ -396,33 +403,74 @@ func (c *UsageCalculator) buildUsageResponse( if !FlavorGroupAcceptsCommitments(&groupData) { continue } - resourceName := liquid.ResourceName(ResourceNameFromFlavorGroup(flavorGroupName)) - - perAZ := make(map[liquid.AvailabilityZone]*liquid.AZResourceUsageReport) - // Initialize all AZs with zero usage + // === 1. RAM Resource === + ramResourceName := liquid.ResourceName(ResourceNameRAM(flavorGroupName)) + ramPerAZ := make(map[liquid.AvailabilityZone]*liquid.AZResourceUsageReport) for _, az := range allAZs { - perAZ[az] = &liquid.AZResourceUsageReport{ + ramPerAZ[az] = &liquid.AZResourceUsageReport{ Usage: 0, Subresources: []liquid.Subresource{}, } } + if azData, exists := usageByFlavorGroupAZ[flavorGroupName]; exists { + for az, data := range azData { + if _, known := ramPerAZ[az]; !known { + ramPerAZ[az] = &liquid.AZResourceUsageReport{} + } + ramPerAZ[az].Usage = data.ramUsage + ramPerAZ[az].PhysicalUsage = Some(data.ramUsage) // No overcommit for RAM + // Subresources are only on instances resource + } + } + resources[ramResourceName] = &liquid.ResourceUsageReport{ + PerAZ: ramPerAZ, + } - // Fill in actual usage data + // === 2. Cores Resource === + coresResourceName := liquid.ResourceName(ResourceNameCores(flavorGroupName)) + coresPerAZ := make(map[liquid.AvailabilityZone]*liquid.AZResourceUsageReport) + for _, az := range allAZs { + coresPerAZ[az] = &liquid.AZResourceUsageReport{ + Usage: 0, + Subresources: []liquid.Subresource{}, + } + } if azData, exists := usageByFlavorGroupAZ[flavorGroupName]; exists { for az, data := range azData { - if _, known := perAZ[az]; !known { - // AZ not in allAZs, add it anyway - perAZ[az] = &liquid.AZResourceUsageReport{} + if _, known := coresPerAZ[az]; !known { + coresPerAZ[az] = &liquid.AZResourceUsageReport{} } - perAZ[az].Usage = data.usage - perAZ[az].PhysicalUsage = Some(data.usage) // No overcommit for RAM - perAZ[az].Subresources = data.subresources + coresPerAZ[az].Usage = data.coresUsage + coresPerAZ[az].PhysicalUsage = Some(data.coresUsage) // No overcommit for cores + // Subresources are only on instances resource } } + resources[coresResourceName] = &liquid.ResourceUsageReport{ + PerAZ: coresPerAZ, + } - resources[resourceName] = &liquid.ResourceUsageReport{ - PerAZ: perAZ, + // === 3. Instances Resource === + instancesResourceName := liquid.ResourceName(ResourceNameInstances(flavorGroupName)) + instancesPerAZ := make(map[liquid.AvailabilityZone]*liquid.AZResourceUsageReport) + for _, az := range allAZs { + instancesPerAZ[az] = &liquid.AZResourceUsageReport{ + Usage: 0, + Subresources: []liquid.Subresource{}, + } + } + if azData, exists := usageByFlavorGroupAZ[flavorGroupName]; exists { + for az, data := range azData { + if _, known := instancesPerAZ[az]; !known { + instancesPerAZ[az] = &liquid.AZResourceUsageReport{} + } + instancesPerAZ[az].Usage = data.instanceCount + instancesPerAZ[az].PhysicalUsage = Some(data.instanceCount) + instancesPerAZ[az].Subresources = data.subresources // VM details on instances resource + } + } + resources[instancesResourceName] = &liquid.ResourceUsageReport{ + PerAZ: instancesPerAZ, } } diff --git a/internal/scheduling/reservations/commitments/usage_test.go b/internal/scheduling/reservations/commitments/usage_test.go index 04b32fffa..425407db5 100644 --- a/internal/scheduling/reservations/commitments/usage_test.go +++ b/internal/scheduling/reservations/commitments/usage_test.go @@ -584,9 +584,10 @@ func TestUsageCalculator_ExpiredAndFutureCommitments(t *testing.T) { } // Find the VM in subresources and check its commitment assignment - res, ok := report.Resources["hw_version_hana_1_ram"] + // Subresources are now on the instances resource, not RAM + res, ok := report.Resources["hw_version_hana_1_instances"] if !ok { - t.Fatal("Resource hw_version_hana_1_ram not found") + t.Fatal("Resource hw_version_hana_1_instances not found") } var foundCommitment any From 38664242e696899fd8208b58c102c507efe8d1f4 Mon Sep 17 00:00:00 2001 From: mblos Date: Thu, 26 Mar 2026 14:05:49 +0100 Subject: [PATCH 2/9] New instances details output format --- internal/scheduling/nova/nova_client.go | 44 +++++----- .../commitments/api_report_usage_test.go | 31 ++++--- .../reservations/commitments/usage.go | 80 +++++++++++++++--- .../reservations/commitments/usage_test.go | 83 +++++++++++++++---- 4 files changed, 180 insertions(+), 58 deletions(-) diff --git a/internal/scheduling/nova/nova_client.go b/internal/scheduling/nova/nova_client.go index 321ff0b70..65cad576a 100644 --- a/internal/scheduling/nova/nova_client.go +++ b/internal/scheduling/nova/nova_client.go @@ -40,17 +40,19 @@ type migration struct { // ServerDetail contains extended server information for usage reporting. type ServerDetail struct { - ID string `json:"id"` - Name string `json:"name"` - Status string `json:"status"` - TenantID string `json:"tenant_id"` - Created string `json:"created"` - AvailabilityZone string `json:"OS-EXT-AZ:availability_zone"` - Hypervisor string `json:"OS-EXT-SRV-ATTR:hypervisor_hostname"` - FlavorName string // Populated from nested flavor.original_name - FlavorRAM uint64 // Populated from nested flavor.ram - FlavorVCPUs uint64 // Populated from nested flavor.vcpus - FlavorDisk uint64 // Populated from nested flavor.disk + ID string `json:"id"` + Name string `json:"name"` + Status string `json:"status"` + TenantID string `json:"tenant_id"` + Created string `json:"created"` + AvailabilityZone string `json:"OS-EXT-AZ:availability_zone"` + Hypervisor string `json:"OS-EXT-SRV-ATTR:hypervisor_hostname"` + FlavorName string // Populated from nested flavor.original_name + FlavorRAM uint64 // Populated from nested flavor.ram + FlavorVCPUs uint64 // Populated from nested flavor.vcpus + FlavorDisk uint64 // Populated from nested flavor.disk + Metadata map[string]string // Server metadata key-value pairs + Tags []string // Server tags } type NovaClient interface { @@ -203,16 +205,18 @@ func (api *novaClient) ListProjectServers(ctx context.Context, projectID string) return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode) } - // Response structure with nested flavor + // Response structure with nested flavor, metadata, and tags var list struct { Servers []struct { - ID string `json:"id"` - Name string `json:"name"` - Status string `json:"status"` - TenantID string `json:"tenant_id"` - Created string `json:"created"` - AvailabilityZone string `json:"OS-EXT-AZ:availability_zone"` - Hypervisor string `json:"OS-EXT-SRV-ATTR:hypervisor_hostname"` + ID string `json:"id"` + Name string `json:"name"` + Status string `json:"status"` + TenantID string `json:"tenant_id"` + Created string `json:"created"` + AvailabilityZone string `json:"OS-EXT-AZ:availability_zone"` + Hypervisor string `json:"OS-EXT-SRV-ATTR:hypervisor_hostname"` + Metadata map[string]string `json:"metadata"` + Tags []string `json:"tags"` Flavor struct { OriginalName string `json:"original_name"` RAM uint64 `json:"ram"` @@ -244,6 +248,8 @@ func (api *novaClient) ListProjectServers(ctx context.Context, projectID string) FlavorRAM: s.Flavor.RAM, FlavorVCPUs: s.Flavor.VCPUs, FlavorDisk: s.Flavor.Disk, + Metadata: s.Metadata, + Tags: s.Tags, }) } diff --git a/internal/scheduling/reservations/commitments/api_report_usage_test.go b/internal/scheduling/reservations/commitments/api_report_usage_test.go index e4baf8170..f54d2a679 100644 --- a/internal/scheduling/reservations/commitments/api_report_usage_test.go +++ b/internal/scheduling/reservations/commitments/api_report_usage_test.go @@ -735,10 +735,10 @@ func verifyUsageReport(t *testing.T, tc UsageReportTestCase, actual liquid.Servi } } - // Verify memory - if actualVM.RAM != expectedVM.MemoryMB { + // Verify memory (now nested in flavor) + if actualVM.Flavor.MemoryMiB != expectedVM.MemoryMB { t.Errorf("Resource %s AZ %s VM %s: expected RAM %d MB, got %d MB", - instancesResourceName, azName, expectedVM.UUID, expectedVM.MemoryMB, actualVM.RAM) + instancesResourceName, azName, expectedVM.UUID, expectedVM.MemoryMB, actualVM.Flavor.MemoryMiB) } } } @@ -746,16 +746,23 @@ func verifyUsageReport(t *testing.T, tc UsageReportTestCase, actual liquid.Servi } // vmAttributes is used to parse the subresource attributes JSON. +// Uses the liquid-nova format with nested flavor structure. type vmAttributes struct { - ID string `json:"-"` // set from Subresource.ID - Name string `json:"name"` - Flavor string `json:"flavor"` - Status string `json:"status"` - Hypervisor string `json:"hypervisor"` - RAM uint64 `json:"ram"` - VCPU uint64 `json:"vcpu"` - Disk uint64 `json:"disk"` - CommitmentID string `json:"commitment_id,omitempty"` + ID string `json:"-"` // set from Subresource.ID + Status string `json:"status"` + Metadata map[string]string `json:"metadata"` + Tags []string `json:"tags"` + Flavor vmFlavorAttrs `json:"flavor"` + OSType string `json:"os_type"` + CommitmentID string `json:"commitment_id,omitempty"` +} + +// vmFlavorAttrs is the nested flavor info within vm attributes. +type vmFlavorAttrs struct { + Name string `json:"name"` + VCPUs uint64 `json:"vcpu"` + MemoryMiB uint64 `json:"ram_mib"` + DiskGiB uint64 `json:"disk_gib"` } // ============================================================================ diff --git a/internal/scheduling/reservations/commitments/usage.go b/internal/scheduling/reservations/commitments/usage.go index ba5681732..270bc7dd0 100644 --- a/internal/scheduling/reservations/commitments/usage.go +++ b/internal/scheduling/reservations/commitments/usage.go @@ -32,7 +32,29 @@ type VMUsageInfo struct { AZ string Hypervisor string CreatedAt time.Time - UsageMultiple uint64 // Memory in multiples of smallest flavor in the group + UsageMultiple uint64 // Memory in multiples of smallest flavor in the group + Metadata map[string]string // Server metadata from Nova + Tags []string // Server tags from Nova +} + +// flavorAttributes represents flavor information for a VM subresource. +// Matches the format used by liquid-nova for consistency. +type flavorAttributes struct { + Name string `json:"name"` + VCPUs uint64 `json:"vcpu"` + MemoryMiB uint64 `json:"ram_mib"` + DiskGiB uint64 `json:"disk_gib"` + VideoMemoryMiB *uint64 `json:"video_ram_mib,omitempty"` // Not available yet +} + +// subresourceAttributes is the Attributes payload for a VM subresource. +// Matches the format used by liquid-nova for consistency. +type subresourceAttributes struct { + Status string `json:"status"` + Metadata map[string]string `json:"metadata"` + Tags []string `json:"tags"` + Flavor flavorAttributes `json:"flavor"` + OSType string `json:"os_type"` // Not available yet, left empty } // UsageCalculator computes usage reports for Limes LIQUID API. @@ -243,6 +265,8 @@ func (c *UsageCalculator) getProjectVMs( Hypervisor: server.Hypervisor, CreatedAt: createdAt, UsageMultiple: usageMultiple, + Metadata: server.Metadata, + Tags: server.Tags, } vms = append(vms, vm) @@ -481,25 +505,57 @@ func (c *UsageCalculator) buildUsageResponse( } // buildVMAttributes creates the attributes map for a VM subresource. +// Follows the liquid-nova format with nested flavor structure. func buildVMAttributes(vm VMUsageInfo, commitmentID string) map[string]any { - attributes := map[string]any{ - "name": vm.Name, - "flavor": vm.FlavorName, - "status": vm.Status, - "hypervisor": vm.Hypervisor, - "ram": vm.MemoryMB, - "vcpu": vm.VCPUs, - "disk": vm.DiskGB, + // Build metadata map (never nil for JSON) + metadata := vm.Metadata + if metadata == nil { + metadata = map[string]string{} + } + + // Build tags slice (never nil for JSON) + tags := vm.Tags + if tags == nil { + tags = []string{} + } + + attributes := subresourceAttributes{ + Status: vm.Status, + Metadata: metadata, + Tags: tags, + Flavor: flavorAttributes{ + Name: vm.FlavorName, + VCPUs: vm.VCPUs, + MemoryMiB: vm.MemoryMB, + DiskGiB: vm.DiskGB, + // VideoMemoryMiB: nil - not available yet + }, + OSType: "", // Not available yet + } + + // Convert to map[string]any and add extra fields + result := map[string]any{ + "status": attributes.Status, + "metadata": attributes.Metadata, + "tags": attributes.Tags, + "flavor": map[string]any{ + "name": attributes.Flavor.Name, + "vcpu": attributes.Flavor.VCPUs, + "ram_mib": attributes.Flavor.MemoryMiB, + "disk_gib": attributes.Flavor.DiskGiB, + // video_ram_mib omitted when nil + }, + "os_type": attributes.OSType, } // Add commitment_id - nil for PAYG, string for committed if commitmentID != "" { - attributes["commitment_id"] = commitmentID + result["commitment_id"] = commitmentID } else { - attributes["commitment_id"] = nil + result["commitment_id"] = nil } - return attributes + return result } // countCommitmentStates returns the total number of commitments across all az:flavorGroup keys. diff --git a/internal/scheduling/reservations/commitments/usage_test.go b/internal/scheduling/reservations/commitments/usage_test.go index 425407db5..1e2a1a64e 100644 --- a/internal/scheduling/reservations/commitments/usage_test.go +++ b/internal/scheduling/reservations/commitments/usage_test.go @@ -331,35 +331,62 @@ func TestBuildVMAttributes(t *testing.T) { MemoryMB: 4096, VCPUs: 16, DiskGB: 100, + Metadata: map[string]string{"env": "prod"}, + Tags: []string{"important"}, } t.Run("with commitment", func(t *testing.T) { attrs := buildVMAttributes(vm, "commit-456") - if attrs["name"] != "my-vm" { - t.Errorf("name = %v, expected my-vm", attrs["name"]) - } - if attrs["flavor"] != "m1.large" { - t.Errorf("flavor = %v, expected m1.large", attrs["flavor"]) - } + // Status at top level if attrs["status"] != "ACTIVE" { t.Errorf("status = %v, expected ACTIVE", attrs["status"]) } - if attrs["hypervisor"] != "host-1" { - t.Errorf("hypervisor = %v, expected host-1", attrs["hypervisor"]) - } - if attrs["ram"] != uint64(4096) { - t.Errorf("ram = %v, expected 4096", attrs["ram"]) + + // Metadata at top level + metadata, ok := attrs["metadata"].(map[string]string) + if !ok { + t.Errorf("metadata is not map[string]string: %T", attrs["metadata"]) + } else if metadata["env"] != "prod" { + t.Errorf("metadata[env] = %v, expected prod", metadata["env"]) } - if attrs["vcpu"] != uint64(16) { - t.Errorf("vcpu = %v, expected 16", attrs["vcpu"]) + + // Tags at top level + tags, ok := attrs["tags"].([]string) + if !ok { + t.Errorf("tags is not []string: %T", attrs["tags"]) + } else if len(tags) != 1 || tags[0] != "important" { + t.Errorf("tags = %v, expected [important]", tags) } - if attrs["disk"] != uint64(100) { - t.Errorf("disk = %v, expected 100", attrs["disk"]) + + // Flavor is now nested + flavor, ok := attrs["flavor"].(map[string]any) + if !ok { + t.Errorf("flavor is not map[string]any: %T", attrs["flavor"]) + } else { + if flavor["name"] != "m1.large" { + t.Errorf("flavor.name = %v, expected m1.large", flavor["name"]) + } + if flavor["vcpu"] != uint64(16) { + t.Errorf("flavor.vcpu = %v, expected 16", flavor["vcpu"]) + } + if flavor["ram_mib"] != uint64(4096) { + t.Errorf("flavor.ram_mib = %v, expected 4096", flavor["ram_mib"]) + } + if flavor["disk_gib"] != uint64(100) { + t.Errorf("flavor.disk_gib = %v, expected 100", flavor["disk_gib"]) + } } + + // Commitment ID if attrs["commitment_id"] != "commit-456" { t.Errorf("commitment_id = %v, expected commit-456", attrs["commitment_id"]) } + + // OS type (empty for now) + if attrs["os_type"] != "" { + t.Errorf("os_type = %v, expected empty string", attrs["os_type"]) + } }) t.Run("without commitment (PAYG)", func(t *testing.T) { @@ -369,6 +396,32 @@ func TestBuildVMAttributes(t *testing.T) { t.Errorf("commitment_id = %v, expected nil", attrs["commitment_id"]) } }) + + t.Run("with nil metadata and tags", func(t *testing.T) { + vmEmpty := VMUsageInfo{ + UUID: "vm-empty", + Name: "empty-vm", + FlavorName: "m1.small", + Status: "ACTIVE", + MemoryMB: 1024, + VCPUs: 2, + DiskGB: 10, + Metadata: nil, + Tags: nil, + } + attrs := buildVMAttributes(vmEmpty, "") + + // Should have empty map and slice, not nil (for JSON serialization) + metadata, ok := attrs["metadata"].(map[string]string) + if !ok || metadata == nil { + t.Errorf("metadata should be empty map, got %T: %v", attrs["metadata"], attrs["metadata"]) + } + + tags, ok := attrs["tags"].([]string) + if !ok || tags == nil { + t.Errorf("tags should be empty slice, got %T: %v", attrs["tags"], attrs["tags"]) + } + }) } func TestCountCommitmentStates(t *testing.T) { From 10eb7303b95029985d2d83c160afeb0b3b9f5ba9 Mon Sep 17 00:00:00 2001 From: mblos Date: Thu, 26 Mar 2026 14:10:49 +0100 Subject: [PATCH 3/9] Adding OSTypeProber --- go.mod | 7 ++++ go.sum | 10 ++++- internal/scheduling/nova/nova_client.go | 39 ++++++++++++++++++- .../reservations/commitments/usage.go | 6 ++- 4 files changed, 57 insertions(+), 5 deletions(-) diff --git a/go.mod b/go.mod index a6c3b9ce7..a40308958 100644 --- a/go.mod +++ b/go.mod @@ -17,6 +17,13 @@ require ( sigs.k8s.io/controller-runtime v0.23.3 ) +require ( + github.com/databus23/goslo.policy v0.0.0-20250326134918-4afc2c56a903 // indirect + github.com/gofrs/uuid/v5 v5.4.0 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect +) + require ( cel.dev/expr v0.25.1 // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect diff --git a/go.sum b/go.sum index 6dba2b5cc..80120fd64 100644 --- a/go.sum +++ b/go.sum @@ -20,8 +20,6 @@ github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1x github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cobaltcore-dev/openstack-hypervisor-operator v1.0.1 h1:wXolWfljyQQZbxNQ2pZVIw8wFz9BKiDIvLrECsqGDT8= -github.com/cobaltcore-dev/openstack-hypervisor-operator v1.0.1/go.mod h1:b0KmJdxvRI8UXlGe8cRm5BD8Tm2WhF7zSKMSIRGyVL4= github.com/cobaltcore-dev/openstack-hypervisor-operator v1.0.2-0.20260324155836-56b40c7ff846 h1:Hg5+F1lOUpU9dZ8gVxeohodtYC4Z1fV/iqwYoF/RuNc= github.com/cobaltcore-dev/openstack-hypervisor-operator v1.0.2-0.20260324155836-56b40c7ff846/go.mod h1:j1SaxUTo0irugdC7aHuYDKEomIPZwCHoz+4kE8EBBGM= github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4= @@ -31,6 +29,8 @@ github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/databus23/goslo.policy v0.0.0-20250326134918-4afc2c56a903 h1:RiumxYxPww35QeXCGV9NTohc7eGQwlVdz+p3nNHIF28= +github.com/databus23/goslo.policy v0.0.0-20250326134918-4afc2c56a903/go.mod h1:tRj172JgwQmUmEqZZJBWzYWFStitMFTtb95NtUnmpkw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -78,6 +78,8 @@ github.com/go-sql-driver/mysql v1.5.0 h1:ozyZYNQW3x3HtqT1jira07DN2PArx2v7/mN66gG github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gofrs/uuid/v5 v5.4.0 h1:EfbpCTjqMuGyq5ZJwxqzn3Cbr2d0rUZU7v5ycAk/e/0= +github.com/gofrs/uuid/v5 v5.4.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-migrate/migrate/v4 v4.19.1 h1:OCyb44lFuQfYXYLx1SCxPZQGU7mcaZ7gH9yH4jSFbBA= @@ -101,10 +103,14 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gophercloud/gophercloud/v2 v2.11.1 h1:jCs4vLH8sJgRqrPzqVfWgl7uI6JnIIlsgeIRM0uHjxY= github.com/gophercloud/gophercloud/v2 v2.11.1/go.mod h1:Rm0YvKQ4QYX2rY9XaDKnjRzSGwlG5ge4h6ABYnmkKQM= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 h1:X5VWvz21y3gzm9Nw/kaUeku/1+uBhcekkmy4IkffJww= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1/go.mod h1:Zanoh4+gvIgluNqcfMVTJueD4wSS5hT7zTt4Mrutd90= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ironcore-dev/ironcore v0.2.4 h1:i/RqiMIdzaptuDR6EKSX9hbeolj7AfTuT+4v1ZC4Jeg= diff --git a/internal/scheduling/nova/nova_client.go b/internal/scheduling/nova/nova_client.go index 65cad576a..7b1600971 100644 --- a/internal/scheduling/nova/nova_client.go +++ b/internal/scheduling/nova/nova_client.go @@ -14,6 +14,7 @@ import ( "github.com/cobaltcore-dev/cortex/pkg/sso" "github.com/gophercloud/gophercloud/v2" "github.com/gophercloud/gophercloud/v2/openstack/compute/v2/servers" + "github.com/sapcc/go-bits/liquidapi" corev1 "k8s.io/api/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -53,6 +54,7 @@ type ServerDetail struct { FlavorDisk uint64 // Populated from nested flavor.disk Metadata map[string]string // Server metadata key-value pairs Tags []string // Server tags + OSType string // OS type determined by OSTypeProber } type NovaClient interface { @@ -71,6 +73,8 @@ type NovaClient interface { type novaClient struct { // Authenticated OpenStack service client to fetch the data. sc *gophercloud.ServiceClient + // OS type prober for determining VM operating system type (for billing). + osTypeProber *liquidapi.OSTypeProber } func NewNovaClient() NovaClient { @@ -111,6 +115,16 @@ func (api *novaClient) Init(ctx context.Context, client client.Client, conf Nova // We need that to find placement resource providers for hypervisors. Microversion: "2.53", } + + // Initialize OS type prober for determining VM operating system type. + // Uses existing provider client to access Glance (image) and Cinder (volume) APIs. + eo := gophercloud.EndpointOpts{Availability: gophercloud.Availability(authenticatedKeystone.Availability())} + api.osTypeProber, err = liquidapi.NewOSTypeProber(provider, eo) + if err != nil { + slog.Warn("failed to initialize OS type prober - os_type will be empty", "error", err) + // Non-fatal - continue without OS type probing + } + return nil } @@ -205,7 +219,7 @@ func (api *novaClient) ListProjectServers(ctx context.Context, projectID string) return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode) } - // Response structure with nested flavor, metadata, and tags + // Response structure with nested flavor, metadata, tags, image, and volumes var list struct { Servers []struct { ID string `json:"id"` @@ -223,6 +237,12 @@ func (api *novaClient) ListProjectServers(ctx context.Context, projectID string) VCPUs uint64 `json:"vcpus"` Disk uint64 `json:"disk"` } `json:"flavor"` + // For OS type probing + Image map[string]any `json:"image"` + AttachedVolumes []struct { + ID string `json:"id"` + DeleteOnTermination bool `json:"delete_on_termination"` + } `json:"os-extended-volumes:volumes_attached"` } `json:"servers"` Links []struct { Rel string `json:"rel"` @@ -236,6 +256,22 @@ func (api *novaClient) ListProjectServers(ctx context.Context, projectID string) // Convert to ServerDetail for _, s := range list.Servers { + // Probe OS type if prober is available + osType := "" + if api.osTypeProber != nil { + // Build a minimal servers.Server for the prober + vols := make([]servers.AttachedVolume, len(s.AttachedVolumes)) + for i, v := range s.AttachedVolumes { + vols[i] = servers.AttachedVolume{ID: v.ID} + } + proberServer := servers.Server{ + ID: s.ID, + Image: s.Image, + AttachedVolumes: vols, + } + osType = api.osTypeProber.Get(ctx, proberServer) + } + result = append(result, ServerDetail{ ID: s.ID, Name: s.Name, @@ -250,6 +286,7 @@ func (api *novaClient) ListProjectServers(ctx context.Context, projectID string) FlavorDisk: s.Flavor.Disk, Metadata: s.Metadata, Tags: s.Tags, + OSType: osType, }) } diff --git a/internal/scheduling/reservations/commitments/usage.go b/internal/scheduling/reservations/commitments/usage.go index 270bc7dd0..0ffe14cd5 100644 --- a/internal/scheduling/reservations/commitments/usage.go +++ b/internal/scheduling/reservations/commitments/usage.go @@ -35,6 +35,7 @@ type VMUsageInfo struct { UsageMultiple uint64 // Memory in multiples of smallest flavor in the group Metadata map[string]string // Server metadata from Nova Tags []string // Server tags from Nova + OSType string // OS type from OSTypeProber (for billing) } // flavorAttributes represents flavor information for a VM subresource. @@ -267,6 +268,7 @@ func (c *UsageCalculator) getProjectVMs( UsageMultiple: usageMultiple, Metadata: server.Metadata, Tags: server.Tags, + OSType: server.OSType, } vms = append(vms, vm) @@ -530,7 +532,7 @@ func buildVMAttributes(vm VMUsageInfo, commitmentID string) map[string]any { DiskGiB: vm.DiskGB, // VideoMemoryMiB: nil - not available yet }, - OSType: "", // Not available yet + OSType: vm.OSType, } // Convert to map[string]any and add extra fields @@ -545,7 +547,7 @@ func buildVMAttributes(vm VMUsageInfo, commitmentID string) map[string]any { "disk_gib": attributes.Flavor.DiskGiB, // video_ram_mib omitted when nil }, - "os_type": attributes.OSType, + "os_type": vm.OSType, } // Add commitment_id - nil for PAYG, string for committed From 7a3ac6190ee52ffb869b76551a25f5eb53bf1c15 Mon Sep 17 00:00:00 2001 From: mblos Date: Thu, 26 Mar 2026 14:15:27 +0100 Subject: [PATCH 4/9] fix testcase --- .../api_change_commitments_test.go | 36 ++------------ .../reservations/commitments/usage.go | 49 +++---------------- 2 files changed, 10 insertions(+), 75 deletions(-) diff --git a/internal/scheduling/reservations/commitments/api_change_commitments_test.go b/internal/scheduling/reservations/commitments/api_change_commitments_test.go index c304d9e5a..81113f5f5 100644 --- a/internal/scheduling/reservations/commitments/api_change_commitments_test.go +++ b/internal/scheduling/reservations/commitments/api_change_commitments_test.go @@ -1069,35 +1069,14 @@ func (env *CommitmentTestEnv) LogStateSummary() { } // CallChangeCommitmentsAPI calls the change commitments API endpoint with JSON. -// It uses a hybrid approach: fast polling during API execution + synchronous final pass. +// Reservation processing is fully synchronous via operationInterceptorClient hooks. func (env *CommitmentTestEnv) CallChangeCommitmentsAPI(reqJSON string) (resp liquid.CommitmentChangeResponse, respJSON string, statusCode int) { env.T.Helper() - // Start fast polling in background to handle reservations during API execution - ctx, cancel := context.WithCancel(context.Background()) - done := make(chan struct{}) - - go func() { - ticker := time.NewTicker(5 * time.Millisecond) // Very fast - 5ms - defer ticker.Stop() - defer close(done) - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - env.processReservations() - } - } - }() - - // Make HTTP request + // Make HTTP request - reservation processing happens synchronously via Create/Delete hooks url := env.HTTPServer.URL + "/commitments/v1/change-commitments" httpResp, err := http.Post(url, "application/json", bytes.NewReader([]byte(reqJSON))) //nolint:gosec,noctx // test server URL, not user input if err != nil { - cancel() - <-done env.T.Fatalf("Failed to make HTTP request: %v", err) } defer httpResp.Body.Close() @@ -1105,8 +1084,6 @@ func (env *CommitmentTestEnv) CallChangeCommitmentsAPI(reqJSON string) (resp liq // Read response body respBytes, err := io.ReadAll(httpResp.Body) if err != nil { - cancel() - <-done env.T.Fatalf("Failed to read response body: %v", err) } @@ -1116,18 +1093,11 @@ func (env *CommitmentTestEnv) CallChangeCommitmentsAPI(reqJSON string) (resp liq // Non-200 responses (like 409 Conflict for version mismatch) use plain text via http.Error() if httpResp.StatusCode == http.StatusOK { if err := json.Unmarshal(respBytes, &resp); err != nil { - cancel() - <-done env.T.Fatalf("Failed to unmarshal response: %v", err) } } - // Stop background polling - cancel() - <-done - - // Final synchronous pass to ensure all reservations are processed - // This eliminates any race conditions + // Final pass to handle any deletions (finalizer removal) env.processReservations() statusCode = httpResp.StatusCode diff --git a/internal/scheduling/reservations/commitments/usage.go b/internal/scheduling/reservations/commitments/usage.go index 0ffe14cd5..bcdee5352 100644 --- a/internal/scheduling/reservations/commitments/usage.go +++ b/internal/scheduling/reservations/commitments/usage.go @@ -38,26 +38,6 @@ type VMUsageInfo struct { OSType string // OS type from OSTypeProber (for billing) } -// flavorAttributes represents flavor information for a VM subresource. -// Matches the format used by liquid-nova for consistency. -type flavorAttributes struct { - Name string `json:"name"` - VCPUs uint64 `json:"vcpu"` - MemoryMiB uint64 `json:"ram_mib"` - DiskGiB uint64 `json:"disk_gib"` - VideoMemoryMiB *uint64 `json:"video_ram_mib,omitempty"` // Not available yet -} - -// subresourceAttributes is the Attributes payload for a VM subresource. -// Matches the format used by liquid-nova for consistency. -type subresourceAttributes struct { - Status string `json:"status"` - Metadata map[string]string `json:"metadata"` - Tags []string `json:"tags"` - Flavor flavorAttributes `json:"flavor"` - OSType string `json:"os_type"` // Not available yet, left empty -} - // UsageCalculator computes usage reports for Limes LIQUID API. type UsageCalculator struct { client client.Client @@ -521,30 +501,15 @@ func buildVMAttributes(vm VMUsageInfo, commitmentID string) map[string]any { tags = []string{} } - attributes := subresourceAttributes{ - Status: vm.Status, - Metadata: metadata, - Tags: tags, - Flavor: flavorAttributes{ - Name: vm.FlavorName, - VCPUs: vm.VCPUs, - MemoryMiB: vm.MemoryMB, - DiskGiB: vm.DiskGB, - // VideoMemoryMiB: nil - not available yet - }, - OSType: vm.OSType, - } - - // Convert to map[string]any and add extra fields result := map[string]any{ - "status": attributes.Status, - "metadata": attributes.Metadata, - "tags": attributes.Tags, + "status": vm.Status, + "metadata": metadata, + "tags": tags, "flavor": map[string]any{ - "name": attributes.Flavor.Name, - "vcpu": attributes.Flavor.VCPUs, - "ram_mib": attributes.Flavor.MemoryMiB, - "disk_gib": attributes.Flavor.DiskGiB, + "name": vm.FlavorName, + "vcpu": vm.VCPUs, + "ram_mib": vm.MemoryMB, + "disk_gib": vm.DiskGB, // video_ram_mib omitted when nil }, "os_type": vm.OSType, From aac99fa12b4654ad53427565ed98f0f3d83e8dc1 Mon Sep 17 00:00:00 2001 From: mblos Date: Thu, 26 Mar 2026 14:25:20 +0100 Subject: [PATCH 5/9] . --- .../reservations/commitments/capacity.go | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/internal/scheduling/reservations/commitments/capacity.go b/internal/scheduling/reservations/commitments/capacity.go index 415a45ae0..c14201cf4 100644 --- a/internal/scheduling/reservations/commitments/capacity.go +++ b/internal/scheduling/reservations/commitments/capacity.go @@ -67,12 +67,17 @@ func (c *CapacityCalculator) CalculateCapacity(ctx context.Context) (liquid.Serv } // === 2. Cores Resource === + // NOTE: Copying RAM capacity is only valid while capacity=0 (placeholder). + // When real capacity is implemented, derive cores capacity with unit conversion + // (e.g., cores = RAM / ramCoreRatio). See calculateAZCapacity for details. coresResourceName := liquid.ResourceName(ResourceNameCores(groupName)) report.Resources[coresResourceName] = &liquid.ResourceCapacityReport{ PerAZ: c.copyAZCapacity(azCapacity), } // === 3. Instances Resource === + // NOTE: Same as cores - copying is only valid while capacity=0 (placeholder). + // When real capacity is implemented, derive instances capacity appropriately. instancesResourceName := liquid.ResourceName(ResourceNameInstances(groupName)) report.Resources[instancesResourceName] = &liquid.ResourceCapacityReport{ PerAZ: c.copyAZCapacity(azCapacity), @@ -109,7 +114,15 @@ func (c *CapacityCalculator) calculateAZCapacity( return nil, fmt.Errorf("failed to get availability zones: %w", err) } - // Create report entry for each AZ with placeholder capacity=0 + // Create report entry for each AZ with placeholder capacity=0. + // + // NOTE: When implementing real capacity calculation here, you MUST also update + // the copying logic in CalculateCapacity() for _cores and _instances resources. + // Those resources use different units (vCPUs and VM count) than _ram (memory multiples), + // so the capacity values cannot be simply copied - they require unit conversion: + // - _cores capacity = RAM capacity / ramCoreRatio + // - _instances capacity = needs its own derivation logic + // // TODO: Calculate actual capacity from Reservation CRDs or host resources // TODO: Calculate actual usage from VM allocations result := make(map[liquid.AvailabilityZone]*liquid.AZResourceCapacityReport) From 8c069873c2d1ba19f0f1421d21d30d2191ec4562 Mon Sep 17 00:00:00 2001 From: mblos Date: Thu, 26 Mar 2026 14:30:13 +0100 Subject: [PATCH 6/9] . --- .../reservations/commitments/state.go | 31 ++++++++++--------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/internal/scheduling/reservations/commitments/state.go b/internal/scheduling/reservations/commitments/state.go index 4a30cb814..61ca1ff10 100644 --- a/internal/scheduling/reservations/commitments/state.go +++ b/internal/scheduling/reservations/commitments/state.go @@ -47,27 +47,30 @@ func ResourceNameInstances(flavorGroup string) string { } // getFlavorGroupNameFromResource extracts the flavor group name from a LIQUID resource name. -// Supports all resource types: _ram, _cores, _instances +// Only accepts _ram resources since CommitmentState is RAM-based. +// Callers handling _cores or _instances must use a different approach. func getFlavorGroupNameFromResource(resourceName string) (string, error) { if !strings.HasPrefix(resourceName, resourceNamePrefix) { return "", fmt.Errorf("invalid resource name: %s (missing prefix)", resourceName) } - // Try each known suffix - for _, suffix := range []string{ResourceSuffixRAM, ResourceSuffixCores, ResourceSuffixInstances} { - if strings.HasSuffix(resourceName, suffix) { - // Remove prefix and suffix - name := strings.TrimPrefix(resourceName, resourceNamePrefix) - name = strings.TrimSuffix(name, suffix) - // Validate that the extracted group name is not empty - if name == "" { - return "", fmt.Errorf("invalid resource name: %s (empty group name)", resourceName) - } - return name, nil - } + // Only accept _ram suffix - commitments are RAM-based and CommitmentState + // carries TotalMemoryBytes. Accepting _cores or _instances here would + // silently reinterpret non-RAM amounts as RAM, producing wrong state. + if !strings.HasSuffix(resourceName, ResourceSuffixRAM) { + return "", fmt.Errorf("invalid resource name: %s (only _ram resources are supported for commitments)", resourceName) + } + + // Remove prefix and suffix + name := strings.TrimPrefix(resourceName, resourceNamePrefix) + name = strings.TrimSuffix(name, ResourceSuffixRAM) + + // Validate that the extracted group name is not empty + if name == "" { + return "", fmt.Errorf("invalid resource name: %s (empty group name)", resourceName) } - return "", fmt.Errorf("invalid resource name: %s (unknown suffix)", resourceName) + return name, nil } // CommitmentState represents desired or current commitment resource allocation. From c723b6d3b35b64784bb5722d5be720c2a289cbbf Mon Sep 17 00:00:00 2001 From: mblos Date: Thu, 26 Mar 2026 14:45:06 +0100 Subject: [PATCH 7/9] fix resource reporting --- .../reservations/commitments/api_info.go | 25 ++++++++++--------- .../reservations/commitments/capacity.go | 11 +++----- .../reservations/commitments/usage.go | 14 +++++------ 3 files changed, 23 insertions(+), 27 deletions(-) diff --git a/internal/scheduling/reservations/commitments/api_info.go b/internal/scheduling/reservations/commitments/api_info.go index 150fee5a9..ccb660bc9 100644 --- a/internal/scheduling/reservations/commitments/api_info.go +++ b/internal/scheduling/reservations/commitments/api_info.go @@ -92,10 +92,11 @@ type resourceAttributes struct { } // buildServiceInfo constructs the ServiceInfo response with metadata for all flavor groups. -// For each flavor group that accepts commitments, three resources are registered: -// - _ram: RAM resource (unit = multiples of smallest flavor RAM, HandlesCommitments=true) +// For each flavor group, three resources are registered: +// - _ram: RAM resource (unit = multiples of smallest flavor RAM, HandlesCommitments=true only if fixed ratio) // - _cores: CPU cores resource (unit = 1, HandlesCommitments=false) // - _instances: Instance count resource (unit = 1, HandlesCommitments=false) +// All flavor groups report usage; only those with fixed RAM/core ratio accept commitments. func (api *HTTPAPI) buildServiceInfo(ctx context.Context, logger logr.Logger) (liquid.ServiceInfo, error) { // Get all flavor groups from Knowledge CRDs knowledge := &reservations.FlavorGroupKnowledgeClient{Client: api.client} @@ -111,11 +112,11 @@ func (api *HTTPAPI) buildServiceInfo(ctx context.Context, logger logr.Logger) (l // Build resources map resources := make(map[liquid.ResourceName]liquid.ResourceInfo) for groupName, groupData := range flavorGroups { - // Only handle commitments for groups with a fixed RAM/core ratio + // Determine if this group accepts commitments (requires fixed RAM/core ratio) handlesCommitments := FlavorGroupAcceptsCommitments(&groupData) - if !handlesCommitments { - continue // Skip groups that don't accept commitments - } + + // All flavor groups are registered for usage reporting. + // Only those with a fixed RAM/core ratio have HandlesCommitments=true. flavorNames := make([]string, 0, len(groupData.Flavors)) for _, flavor := range groupData.Flavors { @@ -155,12 +156,12 @@ func (api *HTTPAPI) buildServiceInfo(ctx context.Context, logger logr.Logger) (l groupData.SmallestFlavor.MemoryMB, flavorListStr, ), - Unit: ramUnit, // Non-standard unit: multiples of smallest flavor RAM - Topology: liquid.AZAwareTopology, // Commitments are per-AZ - NeedsResourceDemand: false, // Capacity planning out of scope for now - HasCapacity: true, // We report capacity via /commitments/v1/report-capacity - HasQuota: false, // No quota enforcement as of now - HandlesCommitments: true, // RAM is the primary commitment resource + Unit: ramUnit, // Non-standard unit: multiples of smallest flavor RAM + Topology: liquid.AZAwareTopology, + NeedsResourceDemand: false, + HasCapacity: true, // We report capacity via /commitments/v1/report-capacity + HasQuota: false, + HandlesCommitments: handlesCommitments, // Only groups with fixed ratio accept commitments Attributes: attrsJSON, } diff --git a/internal/scheduling/reservations/commitments/capacity.go b/internal/scheduling/reservations/commitments/capacity.go index c14201cf4..ad4e488da 100644 --- a/internal/scheduling/reservations/commitments/capacity.go +++ b/internal/scheduling/reservations/commitments/capacity.go @@ -25,9 +25,9 @@ func NewCapacityCalculator(client client.Client) *CapacityCalculator { return &CapacityCalculator{client: client} } -// CalculateCapacity computes per-AZ capacity for all flavor groups that accept commitments. -// Only flavor groups with a fixed RAM/core ratio are included in the report. +// CalculateCapacity computes per-AZ capacity for all flavor groups. // For each flavor group, three resources are reported: _ram, _cores, _instances. +// All flavor groups are included, not just those with fixed RAM/core ratio. func (c *CapacityCalculator) CalculateCapacity(ctx context.Context) (liquid.ServiceCapacityReport, error) { // Get all flavor groups from Knowledge CRDs knowledge := &reservations.FlavorGroupKnowledgeClient{Client: c.client} @@ -42,17 +42,14 @@ func (c *CapacityCalculator) CalculateCapacity(ctx context.Context) (liquid.Serv infoVersion = knowledgeCRD.Status.LastContentChange.Unix() } - // Build capacity report per flavor group (only for groups that accept CRs) + // Build capacity report for all flavor groups report := liquid.ServiceCapacityReport{ InfoVersion: infoVersion, Resources: make(map[liquid.ResourceName]*liquid.ResourceCapacityReport), } for groupName, groupData := range flavorGroups { - // Only report capacity for flavor groups that accept commitments - if !FlavorGroupAcceptsCommitments(&groupData) { - continue - } + // All flavor groups are included in capacity reporting (not just those with fixed ratio). // Calculate per-AZ capacity (placeholder: capacity=0 for all resources) azCapacity, err := c.calculateAZCapacity(ctx, groupName, groupData) diff --git a/internal/scheduling/reservations/commitments/usage.go b/internal/scheduling/reservations/commitments/usage.go index bcdee5352..2e8fe4c81 100644 --- a/internal/scheduling/reservations/commitments/usage.go +++ b/internal/scheduling/reservations/commitments/usage.go @@ -350,7 +350,8 @@ type azUsageData struct { } // buildUsageResponse constructs the Liquid API ServiceUsageReport. -// Only flavor groups that accept commitments are included in the report. +// All flavor groups are included in the report; commitment assignment only applies +// to groups with fixed RAM/core ratio (those that accept commitments). // For each flavor group, three resources are reported: _ram, _cores, _instances. func (c *UsageCalculator) buildUsageResponse( vms []VMUsageInfo, @@ -359,7 +360,7 @@ func (c *UsageCalculator) buildUsageResponse( allAZs []liquid.AvailabilityZone, infoVersion int64, ) liquid.ServiceUsageReport { - // Initialize resources map for flavor groups that accept commitments + // Initialize resources map for all flavor groups resources := make(map[liquid.ResourceName]*liquid.ResourceUsageReport) // Group VMs by flavor group and AZ for aggregation @@ -403,12 +404,9 @@ func (c *UsageCalculator) buildUsageResponse( ) } - // Build ResourceUsageReport for each flavor group that accepts commitments - for flavorGroupName, groupData := range flavorGroups { - // Only report usage for flavor groups that accept commitments - if !FlavorGroupAcceptsCommitments(&groupData) { - continue - } + // Build ResourceUsageReport for all flavor groups (not just those with fixed ratio) + for flavorGroupName := range flavorGroups { + // All flavor groups are included in usage reporting. // === 1. RAM Resource === ramResourceName := liquid.ResourceName(ResourceNameRAM(flavorGroupName)) From 6ad0f66e9381ebde67a25d94158886f34ab98853 Mon Sep 17 00:00:00 2001 From: mblos Date: Thu, 26 Mar 2026 14:55:58 +0100 Subject: [PATCH 8/9] fix nil and image parsing --- internal/scheduling/nova/nova_client.go | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/internal/scheduling/nova/nova_client.go b/internal/scheduling/nova/nova_client.go index 7b1600971..f5ec09e23 100644 --- a/internal/scheduling/nova/nova_client.go +++ b/internal/scheduling/nova/nova_client.go @@ -196,6 +196,9 @@ func (api *novaClient) GetServerMigrations(ctx context.Context, id string) ([]mi // ListProjectServers retrieves all servers for a project with detailed info. func (api *novaClient) ListProjectServers(ctx context.Context, projectID string) ([]ServerDetail, error) { + if api.sc == nil { + return nil, fmt.Errorf("nova client not initialized - call Init first") + } // Build URL with pagination support initialURL := api.sc.Endpoint + "servers/detail?all_tenants=true&tenant_id=" + projectID var nextURL = &initialURL @@ -237,8 +240,9 @@ func (api *novaClient) ListProjectServers(ctx context.Context, projectID string) VCPUs uint64 `json:"vcpus"` Disk uint64 `json:"disk"` } `json:"flavor"` - // For OS type probing - Image map[string]any `json:"image"` + // For OS type probing - use json.RawMessage because Nova returns + // either a map (for image-booted VMs) or empty string "" (for volume-booted VMs) + Image json.RawMessage `json:"image"` AttachedVolumes []struct { ID string `json:"id"` DeleteOnTermination bool `json:"delete_on_termination"` @@ -259,6 +263,11 @@ func (api *novaClient) ListProjectServers(ctx context.Context, projectID string) // Probe OS type if prober is available osType := "" if api.osTypeProber != nil { + // Parse image field - Nova returns either a map or empty string "" + var imageMap map[string]any + if len(s.Image) > 0 && s.Image[0] == '{' { + _ = json.Unmarshal(s.Image, &imageMap) + } // Build a minimal servers.Server for the prober vols := make([]servers.AttachedVolume, len(s.AttachedVolumes)) for i, v := range s.AttachedVolumes { @@ -266,7 +275,7 @@ func (api *novaClient) ListProjectServers(ctx context.Context, projectID string) } proberServer := servers.Server{ ID: s.ID, - Image: s.Image, + Image: imageMap, AttachedVolumes: vols, } osType = api.osTypeProber.Get(ctx, proberServer) From aaa9cebf681a2ee88f703177a6197ccd529f2a42 Mon Sep 17 00:00:00 2001 From: mblos Date: Thu, 26 Mar 2026 15:29:16 +0100 Subject: [PATCH 9/9] linting and usage calculation fix --- internal/scheduling/nova/nova_client.go | 6 +- .../reservations/commitments/api_info.go | 4 +- .../reservations/commitments/api_info_test.go | 53 ++++-- .../reservations/commitments/usage.go | 3 +- .../reservations/commitments/usage_test.go | 169 ++++++++++++++++++ 5 files changed, 215 insertions(+), 20 deletions(-) diff --git a/internal/scheduling/nova/nova_client.go b/internal/scheduling/nova/nova_client.go index f5ec09e23..68b84d446 100644 --- a/internal/scheduling/nova/nova_client.go +++ b/internal/scheduling/nova/nova_client.go @@ -6,6 +6,7 @@ package nova import ( "context" "encoding/json" + "errors" "fmt" "log/slog" "net/http" @@ -197,7 +198,7 @@ func (api *novaClient) GetServerMigrations(ctx context.Context, id string) ([]mi // ListProjectServers retrieves all servers for a project with detailed info. func (api *novaClient) ListProjectServers(ctx context.Context, projectID string) ([]ServerDetail, error) { if api.sc == nil { - return nil, fmt.Errorf("nova client not initialized - call Init first") + return nil, errors.New("nova client not initialized - call Init first") } // Build URL with pagination support initialURL := api.sc.Endpoint + "servers/detail?all_tenants=true&tenant_id=" + projectID @@ -266,7 +267,8 @@ func (api *novaClient) ListProjectServers(ctx context.Context, projectID string) // Parse image field - Nova returns either a map or empty string "" var imageMap map[string]any if len(s.Image) > 0 && s.Image[0] == '{' { - _ = json.Unmarshal(s.Image, &imageMap) + // Intentionally ignore parse errors - imageMap will remain nil for volume-booted VMs + json.Unmarshal(s.Image, &imageMap) //nolint:errcheck // error expected for non-JSON values } // Build a minimal servers.Server for the prober vols := make([]servers.AttachedVolume, len(s.AttachedVolumes)) diff --git a/internal/scheduling/reservations/commitments/api_info.go b/internal/scheduling/reservations/commitments/api_info.go index ccb660bc9..bd3b3f3cf 100644 --- a/internal/scheduling/reservations/commitments/api_info.go +++ b/internal/scheduling/reservations/commitments/api_info.go @@ -172,7 +172,7 @@ func (api *HTTPAPI) buildServiceInfo(ctx context.Context, logger logr.Logger) (l "CPU cores (usable by: %s)", flavorListStr, ), - Unit: liquid.UnitNone, // Unit = 1 (count of cores) + Unit: liquid.UnitNone, // Countable unit (omitted in JSON = "1") Topology: liquid.AZAwareTopology, // Same topology as RAM NeedsResourceDemand: false, HasCapacity: true, // We report capacity (as 0 for now) @@ -188,7 +188,7 @@ func (api *HTTPAPI) buildServiceInfo(ctx context.Context, logger logr.Logger) (l "instances (usable by: %s)", flavorListStr, ), - Unit: liquid.UnitNone, // Unit = 1 (count of instances) + Unit: liquid.UnitNone, // Countable unit (omitted in JSON = "1") Topology: liquid.AZAwareTopology, // Same topology as RAM NeedsResourceDemand: false, HasCapacity: true, // We report capacity (as 0 for now) diff --git a/internal/scheduling/reservations/commitments/api_info_test.go b/internal/scheduling/reservations/commitments/api_info_test.go index efcb2d790..b733a853b 100644 --- a/internal/scheduling/reservations/commitments/api_info_test.go +++ b/internal/scheduling/reservations/commitments/api_info_test.go @@ -138,11 +138,10 @@ func TestHandleInfo_InvalidFlavorMemory(t *testing.T) { } func TestHandleInfo_HasCapacityEqualsHandlesCommitments(t *testing.T) { - // Test that for flavor groups that accept commitments: - // - Three resources are created: _ram, _cores, _instances - // - Only _ram has HandlesCommitments=true - // - All three have HasCapacity=true - // Groups that DON'T accept commitments are skipped entirely + // Test that ALL flavor groups get resources created: + // - Three resources are created per group: _ram, _cores, _instances + // - Only _ram of groups with FIXED ratio has HandlesCommitments=true + // - All resources have HasCapacity=true scheme := runtime.NewScheme() if err := v1alpha1.AddToScheme(scheme); err != nil { t.Fatalf("failed to add scheme: %v", err) @@ -218,10 +217,11 @@ func TestHandleInfo_HasCapacityEqualsHandlesCommitments(t *testing.T) { t.Fatalf("failed to decode response: %v", err) } - // Verify we have 3 resources for the fixed ratio group (variable ratio is skipped) + // Verify we have 6 resources (3 per flavor group, both groups included) // hana_fixed generates: _ram, _cores, _instances - if len(serviceInfo.Resources) != 3 { - t.Fatalf("expected 3 resources (_ram, _cores, _instances for hana_fixed), got %d", len(serviceInfo.Resources)) + // v2_variable generates: _ram, _cores, _instances + if len(serviceInfo.Resources) != 6 { + t.Fatalf("expected 6 resources (3 per flavor group), got %d", len(serviceInfo.Resources)) } // Test RAM resource: hw_version_hana_fixed_ram @@ -260,14 +260,37 @@ func TestHandleInfo_HasCapacityEqualsHandlesCommitments(t *testing.T) { t.Error("hw_version_hana_fixed_instances: expected HandlesCommitments=false (instances are derived)") } - // Variable ratio group should NOT have any resources (skipped entirely) - if _, ok := serviceInfo.Resources["hw_version_v2_variable_ram"]; ok { - t.Error("hw_version_v2_variable_ram should NOT exist (variable ratio groups are skipped)") + // Variable ratio group DOES have resources now, but HandlesCommitments=false for RAM + v2RamResource, ok := serviceInfo.Resources["hw_version_v2_variable_ram"] + if !ok { + t.Fatal("expected hw_version_v2_variable_ram resource to exist (all groups included)") + } + if !v2RamResource.HasCapacity { + t.Error("hw_version_v2_variable_ram: expected HasCapacity=true") + } + if v2RamResource.HandlesCommitments { + t.Error("hw_version_v2_variable_ram: expected HandlesCommitments=false (variable ratio)") + } + + v2CoresResource, ok := serviceInfo.Resources["hw_version_v2_variable_cores"] + if !ok { + t.Fatal("expected hw_version_v2_variable_cores resource to exist (all groups included)") + } + if !v2CoresResource.HasCapacity { + t.Error("hw_version_v2_variable_cores: expected HasCapacity=true") + } + if v2CoresResource.HandlesCommitments { + t.Error("hw_version_v2_variable_cores: expected HandlesCommitments=false") + } + + v2InstancesResource, ok := serviceInfo.Resources["hw_version_v2_variable_instances"] + if !ok { + t.Fatal("expected hw_version_v2_variable_instances resource to exist (all groups included)") } - if _, ok := serviceInfo.Resources["hw_version_v2_variable_cores"]; ok { - t.Error("hw_version_v2_variable_cores should NOT exist (variable ratio groups are skipped)") + if !v2InstancesResource.HasCapacity { + t.Error("hw_version_v2_variable_instances: expected HasCapacity=true") } - if _, ok := serviceInfo.Resources["hw_version_v2_variable_instances"]; ok { - t.Error("hw_version_v2_variable_instances should NOT exist (variable ratio groups are skipped)") + if v2InstancesResource.HandlesCommitments { + t.Error("hw_version_v2_variable_instances: expected HandlesCommitments=false") } } diff --git a/internal/scheduling/reservations/commitments/usage.go b/internal/scheduling/reservations/commitments/usage.go index 2e8fe4c81..b9f662de4 100644 --- a/internal/scheduling/reservations/commitments/usage.go +++ b/internal/scheduling/reservations/commitments/usage.go @@ -225,9 +225,10 @@ func (c *UsageCalculator) getProjectVMs( flavorGroup := flavorToGroup[server.FlavorName] // Calculate usage multiple (memory in units of smallest flavor) + // Use floor division (truncate) - actual consumption, not billing var usageMultiple uint64 if smallestMem := flavorToSmallestMemory[server.FlavorName]; smallestMem > 0 { - usageMultiple = (server.FlavorRAM + smallestMem - 1) / smallestMem // Round up + usageMultiple = server.FlavorRAM / smallestMem // Floor division (truncate) } // Normalize AZ - empty or unknown AZs become "unknown" (consistent with limes liquid-nova) diff --git a/internal/scheduling/reservations/commitments/usage_test.go b/internal/scheduling/reservations/commitments/usage_test.go index 1e2a1a64e..0df852d85 100644 --- a/internal/scheduling/reservations/commitments/usage_test.go +++ b/internal/scheduling/reservations/commitments/usage_test.go @@ -673,6 +673,175 @@ func TestUsageCalculator_ExpiredAndFutureCommitments(t *testing.T) { } } +// TestUsageMultipleCalculation_FloorDivision tests that RAM usage is calculated +// using floor division to handle Nova's memory overhead correctly. +// Nova flavors like "2 GiB" actually have 2032 MiB (not 2048) due to overhead. +// A "4 GiB" flavor has 4080 MiB, which is 2.007× the base unit. +// Floor division ensures 4080 / 2032 = 2 (not 3 from ceiling). +func TestUsageMultipleCalculation_FloorDivision(t *testing.T) { + log.SetLogger(zap.New(zap.WriteTo(os.Stderr), zap.UseDevMode(true))) + ctx := context.Background() + baseTime := time.Date(2026, 1, 1, 12, 0, 0, 0, time.UTC) + + // Realistic Nova flavor values with memory overhead (2032 MiB base, not 2048) + // These match real-world hw_version_2101 flavors + smallestFlavor := &TestFlavor{Name: "g_k_c1_m2_v2", Group: "hw_2101", MemoryMB: 2032, VCPUs: 1} + flavor2x := &TestFlavor{Name: "g_k_c2_m4_v2", Group: "hw_2101", MemoryMB: 4080, VCPUs: 2} // ~2× smallest (4080/2032 = 2.007) + flavor8x := &TestFlavor{Name: "g_k_c4_m16_v2", Group: "hw_2101", MemoryMB: 16368, VCPUs: 4} // ~8× smallest (16368/2032 = 8.06) + flavor16x := &TestFlavor{Name: "g_k_c16_m32_v2", Group: "hw_2101", MemoryMB: 32752, VCPUs: 16} // ~16× smallest (32752/2032 = 16.11) + + tests := []struct { + name string + vms []nova.ServerDetail + expectedRAM uint64 // Expected RAM usage in units + expectedCores uint64 // Expected cores usage + expectedInstances uint64 + }{ + { + name: "single smallest flavor - 1 unit", + vms: []nova.ServerDetail{ + { + ID: "vm-001", Name: "vm-001", Status: "ACTIVE", + TenantID: "project-A", AvailabilityZone: "az-a", + Created: baseTime.Format(time.RFC3339), + FlavorName: "g_k_c1_m2_v2", FlavorRAM: 2032, FlavorVCPUs: 1, + }, + }, + expectedRAM: 1, + expectedCores: 1, + expectedInstances: 1, + }, + { + name: "2x flavor with overhead - floor(4080/2032) = 2 units, not 3", + vms: []nova.ServerDetail{ + { + ID: "vm-001", Name: "vm-001", Status: "ACTIVE", + TenantID: "project-A", AvailabilityZone: "az-a", + Created: baseTime.Format(time.RFC3339), + FlavorName: "g_k_c2_m4_v2", FlavorRAM: 4080, FlavorVCPUs: 2, + }, + }, + expectedRAM: 2, // floor(4080/2032) = 2, NOT 3 (ceiling would give 3) + expectedCores: 2, + expectedInstances: 1, + }, + { + name: "multiple VMs - RAM units should match cores for fixed ratio", + vms: []nova.ServerDetail{ + { + ID: "vm-001", Name: "vm-001", Status: "ACTIVE", + TenantID: "project-A", AvailabilityZone: "az-a", + Created: baseTime.Format(time.RFC3339), + FlavorName: "g_k_c1_m2_v2", FlavorRAM: 2032, FlavorVCPUs: 1, + }, + { + ID: "vm-002", Name: "vm-002", Status: "ACTIVE", + TenantID: "project-A", AvailabilityZone: "az-a", + Created: baseTime.Add(time.Second).Format(time.RFC3339), + FlavorName: "g_k_c2_m4_v2", FlavorRAM: 4080, FlavorVCPUs: 2, + }, + { + ID: "vm-003", Name: "vm-003", Status: "ACTIVE", + TenantID: "project-A", AvailabilityZone: "az-a", + Created: baseTime.Add(2 * time.Second).Format(time.RFC3339), + FlavorName: "g_k_c4_m16_v2", FlavorRAM: 16368, FlavorVCPUs: 4, + }, + { + ID: "vm-004", Name: "vm-004", Status: "ACTIVE", + TenantID: "project-A", AvailabilityZone: "az-a", + Created: baseTime.Add(3 * time.Second).Format(time.RFC3339), + FlavorName: "g_k_c16_m32_v2", FlavorRAM: 32752, FlavorVCPUs: 16, + }, + }, + // floor(2032/2032) + floor(4080/2032) + floor(16368/2032) + floor(32752/2032) + // = 1 + 2 + 8 + 16 = 27 (matches sum of vCPUs: 1+2+4+16=23... wait, that's not right) + // Actually cores = 1+2+4+16 = 23 + // RAM units = 1+2+8+16 = 27 + // These don't match because vCPUs and RAM have different ratios per flavor! + expectedRAM: 27, // 1 + 2 + 8 + 16 + expectedCores: 23, // 1 + 2 + 4 + 16 + expectedInstances: 4, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + scheme := runtime.NewScheme() + _ = v1alpha1.AddToScheme(scheme) + _ = hv1.AddToScheme(scheme) + + // Build flavor groups with realistic values + flavorGroups := TestFlavorGroup{ + infoVersion: 1234, + flavors: []compute.FlavorInGroup{ + smallestFlavor.ToFlavorInGroup(), + flavor2x.ToFlavorInGroup(), + flavor8x.ToFlavorInGroup(), + flavor16x.ToFlavorInGroup(), + }, + }.ToFlavorGroupsKnowledge() + + objects := []client.Object{createKnowledgeCRD(flavorGroups)} + k8sClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objects...). + Build() + + novaClient := &mockUsageNovaClient{ + servers: map[string][]nova.ServerDetail{ + "project-A": tt.vms, + }, + } + + calc := NewUsageCalculator(k8sClient, novaClient) + logger := log.FromContext(ctx) + report, err := calc.CalculateUsage(ctx, logger, "project-A", []liquid.AvailabilityZone{"az-a"}) + if err != nil { + t.Fatalf("CalculateUsage failed: %v", err) + } + + // Check RAM usage + ramResource := report.Resources[liquid.ResourceName("hw_version_hw_2101_ram")] + if ramResource == nil { + t.Fatal("hw_version_hw_2101_ram resource not found") + } + var totalRAM uint64 + for _, azReport := range ramResource.PerAZ { + totalRAM += azReport.Usage + } + if totalRAM != tt.expectedRAM { + t.Errorf("RAM usage = %d, expected %d", totalRAM, tt.expectedRAM) + } + + // Check cores usage + coresResource := report.Resources[liquid.ResourceName("hw_version_hw_2101_cores")] + if coresResource == nil { + t.Fatal("hw_version_hw_2101_cores resource not found") + } + var totalCores uint64 + for _, azReport := range coresResource.PerAZ { + totalCores += azReport.Usage + } + if totalCores != tt.expectedCores { + t.Errorf("Cores usage = %d, expected %d", totalCores, tt.expectedCores) + } + + // Check instances usage + instancesResource := report.Resources[liquid.ResourceName("hw_version_hw_2101_instances")] + if instancesResource == nil { + t.Fatal("hw_version_hw_2101_instances resource not found") + } + var totalInstances uint64 + for _, azReport := range instancesResource.PerAZ { + totalInstances += azReport.Usage + } + if totalInstances != tt.expectedInstances { + t.Errorf("Instances usage = %d, expected %d", totalInstances, tt.expectedInstances) + } + }) + } +} + func TestUsageCalculator_AssignVMsToCommitments(t *testing.T) { tests := []struct { name string