From 51b87ea183f9975c73ef4452c857c564d4d16579 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 16 Mar 2026 08:23:10 +0100
Subject: [PATCH 1/4] Upgrade to latest hypervisor crd
---
go.mod | 2 +-
go.sum | 2 +
.../compute/resource_capacity_kvm_test.go | 22 +--
internal/scheduling/nova/integration_test.go | 4 +-
.../filters/filter_has_enough_capacity.go | 2 +-
.../filter_has_enough_capacity_test.go | 4 +-
.../nova/plugins/weighers/kvm_binpack.go | 21 ++-
.../nova/plugins/weighers/kvm_binpack_test.go | 127 +++++++--------
.../weighers/kvm_prefer_smaller_hosts.go | 17 +-
.../weighers/kvm_prefer_smaller_hosts_test.go | 145 ++++++++----------
.../nova/plugins/weighers/vmware_binpack.go | 33 ++--
.../plugins/weighers/vmware_binpack_test.go | 53 +++----
12 files changed, 191 insertions(+), 241 deletions(-)
diff --git a/go.mod b/go.mod
index 2b2219b3d..5bda482d6 100644
--- a/go.mod
+++ b/go.mod
@@ -3,7 +3,7 @@ module github.com/cobaltcore-dev/cortex
go 1.26
require (
- github.com/cobaltcore-dev/openstack-hypervisor-operator v0.0.0-20260313132145-05f22f69d9fd
+ github.com/cobaltcore-dev/openstack-hypervisor-operator v0.0.0-20260316070528-80f53bbce409
github.com/go-gorp/gorp v2.2.0+incompatible
github.com/gophercloud/gophercloud/v2 v2.11.1
github.com/ironcore-dev/ironcore v0.2.4
diff --git a/go.sum b/go.sum
index 01060515c..338b73d74 100644
--- a/go.sum
+++ b/go.sum
@@ -22,6 +22,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cobaltcore-dev/openstack-hypervisor-operator v0.0.0-20260313132145-05f22f69d9fd h1:IzxramZZRC/9FtQQqpbgf8KIpH4soD9cliCFs2+zPd4=
github.com/cobaltcore-dev/openstack-hypervisor-operator v0.0.0-20260313132145-05f22f69d9fd/go.mod h1:b0KmJdxvRI8UXlGe8cRm5BD8Tm2WhF7zSKMSIRGyVL4=
+github.com/cobaltcore-dev/openstack-hypervisor-operator v0.0.0-20260316070528-80f53bbce409 h1:hiTMLk6JZsmFF+ECBJnOVcDAw2d+iCXhk4eDvVpYHYM=
+github.com/cobaltcore-dev/openstack-hypervisor-operator v0.0.0-20260316070528-80f53bbce409/go.mod h1:b0KmJdxvRI8UXlGe8cRm5BD8Tm2WhF7zSKMSIRGyVL4=
github.com/containerd/continuity v0.4.5 h1:ZRoN1sXq9u7V6QoHMcVWGhOwDFqZ4B9i5H6un1Wh0x4=
github.com/containerd/continuity v0.4.5/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE=
github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=
diff --git a/internal/knowledge/kpis/plugins/compute/resource_capacity_kvm_test.go b/internal/knowledge/kpis/plugins/compute/resource_capacity_kvm_test.go
index 015217e15..a900cbdf4 100644
--- a/internal/knowledge/kpis/plugins/compute/resource_capacity_kvm_test.go
+++ b/internal/knowledge/kpis/plugins/compute/resource_capacity_kvm_test.go
@@ -58,11 +58,11 @@ func TestKVMResourceCapacityKPI_Collect(t *testing.T) {
},
},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("128"),
"memory": resource.MustParse("512Gi"),
},
- Allocation: map[string]resource.Quantity{
+ Allocation: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("64"),
"memory": resource.MustParse("256Gi"),
},
@@ -148,11 +148,11 @@ func TestKVMResourceCapacityKPI_Collect(t *testing.T) {
},
},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("256"),
"memory": resource.MustParse("1Ti"),
},
- Allocation: map[string]resource.Quantity{
+ Allocation: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("128"),
"memory": resource.MustParse("512Gi"),
},
@@ -209,11 +209,11 @@ func TestKVMResourceCapacityKPI_Collect(t *testing.T) {
},
},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("64"),
"memory": resource.MustParse("256Gi"),
},
- Allocation: map[string]resource.Quantity{
+ Allocation: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("32"),
"memory": resource.MustParse("128Gi"),
},
@@ -255,11 +255,11 @@ func TestKVMResourceCapacityKPI_Collect(t *testing.T) {
},
},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("100"),
"memory": resource.MustParse("200Gi"),
},
- Allocation: map[string]resource.Quantity{
+ Allocation: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("50"),
"memory": resource.MustParse("100Gi"),
},
@@ -274,11 +274,11 @@ func TestKVMResourceCapacityKPI_Collect(t *testing.T) {
},
},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("200"),
"memory": resource.MustParse("400Gi"),
},
- Allocation: map[string]resource.Quantity{
+ Allocation: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("150"),
"memory": resource.MustParse("300Gi"),
},
@@ -332,7 +332,7 @@ func TestKVMResourceCapacityKPI_Collect(t *testing.T) {
},
},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("96"),
"memory": resource.MustParse("384Gi"),
},
diff --git a/internal/scheduling/nova/integration_test.go b/internal/scheduling/nova/integration_test.go
index 137ac5a10..f066ad821 100644
--- a/internal/scheduling/nova/integration_test.go
+++ b/internal/scheduling/nova/integration_test.go
@@ -48,11 +48,11 @@ func newHypervisor(name, cpuCap, cpuAlloc, memCap, memAlloc string) *hv1.Hypervi
Name: name,
},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse(cpuCap),
"memory": resource.MustParse(memCap),
},
- Allocation: map[string]resource.Quantity{
+ Allocation: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse(cpuAlloc),
"memory": resource.MustParse(memAlloc),
},
diff --git a/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity.go b/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity.go
index a90638eac..a390f5e98 100644
--- a/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity.go
+++ b/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity.go
@@ -45,7 +45,7 @@ func (s *FilterHasEnoughCapacity) Run(traceLog *slog.Logger, request api.Externa
result := s.IncludeAllHostsFromRequest(request)
// This map holds the free resources per host.
- freeResourcesByHost := make(map[string]map[string]resource.Quantity)
+ freeResourcesByHost := make(map[string]map[hv1.ResourceName]resource.Quantity)
// The hypervisor resource auto-discovers its current utilization.
// We can use the hypervisor status to calculate the total capacity
diff --git a/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity_test.go b/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity_test.go
index cb998a286..a4cd621ac 100644
--- a/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity_test.go
+++ b/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity_test.go
@@ -39,11 +39,11 @@ func newHypervisor(name, cpuCap, cpuAlloc, memCap, memAlloc string) *hv1.Hypervi
Name: name,
},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse(cpuCap),
"memory": resource.MustParse(memCap),
},
- Allocation: map[string]resource.Quantity{
+ Allocation: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse(cpuAlloc),
"memory": resource.MustParse(memAlloc),
},
diff --git a/internal/scheduling/nova/plugins/weighers/kvm_binpack.go b/internal/scheduling/nova/plugins/weighers/kvm_binpack.go
index 1a3bd7573..3bed165f4 100644
--- a/internal/scheduling/nova/plugins/weighers/kvm_binpack.go
+++ b/internal/scheduling/nova/plugins/weighers/kvm_binpack.go
@@ -13,7 +13,6 @@ import (
api "github.com/cobaltcore-dev/cortex/api/external/nova"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
- corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
@@ -23,7 +22,7 @@ type KVMBinpackStepOpts struct {
// node's resource utilizations after placing the VM.
// If a resource is not specified, is ignored in the score calculation
// (equivalent to a weight of 0).
- ResourceWeights map[corev1.ResourceName]float64 `json:"resourceWeights"`
+ ResourceWeights map[hv1.ResourceName]float64 `json:"resourceWeights"`
}
// Validate the options to ensure they are correct before running the weigher.
@@ -31,9 +30,9 @@ func (o KVMBinpackStepOpts) Validate() error {
if len(o.ResourceWeights) == 0 {
return errors.New("at least one resource weight must be specified")
}
- supportedResources := []corev1.ResourceName{
- corev1.ResourceMemory,
- corev1.ResourceCPU,
+ supportedResources := []hv1.ResourceName{
+ hv1.ResourceMemory,
+ hv1.ResourceCPU,
}
for resourceName, value := range o.ResourceWeights {
if !slices.Contains(supportedResources, resourceName) {
@@ -94,7 +93,7 @@ func (s *KVMBinpackStep) Run(traceLog *slog.Logger, request api.ExternalSchedule
var totalWeightedUtilization, totalWeight float64
for resourceName, weight := range s.Options.ResourceWeights {
- capacity, ok := hv.Status.Capacity[resourceName.String()]
+ capacity, ok := hv.Status.Capacity[resourceName]
if !ok {
traceLog.Warn("no capacity in status, skipping",
"host", host, "resource", resourceName)
@@ -105,7 +104,7 @@ func (s *KVMBinpackStep) Run(traceLog *slog.Logger, request api.ExternalSchedule
"host", host, "resource", resourceName)
continue
}
- allocation, ok := hv.Status.Allocation[resourceName.String()]
+ allocation, ok := hv.Status.Allocation[resourceName]
if !ok {
traceLog.Warn("no allocation in status, skipping",
"host", host, "resource", resourceName)
@@ -138,15 +137,15 @@ func (s *KVMBinpackStep) Run(traceLog *slog.Logger, request api.ExternalSchedule
}
// calcVMResources calculates the total resource requests for the VM to be scheduled.
-func (s *KVMBinpackStep) calcVMResources(req api.ExternalSchedulerRequest) map[corev1.ResourceName]resource.Quantity {
- resources := make(map[corev1.ResourceName]resource.Quantity)
+func (s *KVMBinpackStep) calcVMResources(req api.ExternalSchedulerRequest) map[hv1.ResourceName]resource.Quantity {
+ resources := make(map[hv1.ResourceName]resource.Quantity)
resourcesMemBytes := int64(req.Spec.Data.Flavor.Data.MemoryMB * 1_000_000) //nolint:gosec // memory values are bounded by Nova
resourcesMemBytes *= int64(req.Spec.Data.NumInstances) //nolint:gosec // instance count is bounded by Nova
- resources[corev1.ResourceMemory] = *resource.
+ resources[hv1.ResourceMemory] = *resource.
NewQuantity(resourcesMemBytes, resource.DecimalSI)
resourcesCPU := int64(req.Spec.Data.Flavor.Data.VCPUs) //nolint:gosec // vCPU values are bounded by Nova
resourcesCPU *= int64(req.Spec.Data.NumInstances) //nolint:gosec // instance count is bounded by Nova
- resources[corev1.ResourceCPU] = *resource.
+ resources[hv1.ResourceCPU] = *resource.
NewQuantity(resourcesCPU, resource.DecimalSI)
return resources
}
diff --git a/internal/scheduling/nova/plugins/weighers/kvm_binpack_test.go b/internal/scheduling/nova/plugins/weighers/kvm_binpack_test.go
index dde381e71..6c742ad84 100644
--- a/internal/scheduling/nova/plugins/weighers/kvm_binpack_test.go
+++ b/internal/scheduling/nova/plugins/weighers/kvm_binpack_test.go
@@ -10,7 +10,6 @@ import (
api "github.com/cobaltcore-dev/cortex/api/external/nova"
hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
- corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -23,11 +22,11 @@ func newHypervisor(name, capacityCPU, capacityMem, allocationCPU, allocationMem
Name: name,
},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse(capacityCPU),
"memory": resource.MustParse(capacityMem),
},
- Allocation: map[string]resource.Quantity{
+ Allocation: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse(allocationCPU),
"memory": resource.MustParse(allocationMem),
},
@@ -81,9 +80,9 @@ func TestKVMBinpackStepOpts_Validate(t *testing.T) {
{
name: "valid opts with memory and cpu weights",
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
- corev1.ResourceCPU: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
+ hv1.ResourceCPU: 1.0,
},
},
wantErr: false,
@@ -91,9 +90,9 @@ func TestKVMBinpackStepOpts_Validate(t *testing.T) {
{
name: "inverted weights should raise error",
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: -1.0,
- corev1.ResourceCPU: -1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: -1.0,
+ hv1.ResourceCPU: -1.0,
},
},
wantErr: true,
@@ -101,9 +100,9 @@ func TestKVMBinpackStepOpts_Validate(t *testing.T) {
{
name: "zero weights should raise error",
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 0.0,
- corev1.ResourceCPU: 0.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 0.0,
+ hv1.ResourceCPU: 0.0,
},
},
wantErr: true,
@@ -111,8 +110,8 @@ func TestKVMBinpackStepOpts_Validate(t *testing.T) {
{
name: "valid opts with only memory weight",
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 2.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 2.0,
},
},
wantErr: false,
@@ -120,8 +119,8 @@ func TestKVMBinpackStepOpts_Validate(t *testing.T) {
{
name: "valid opts with only cpu weight",
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: 0.5,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: 0.5,
},
},
wantErr: false,
@@ -129,9 +128,9 @@ func TestKVMBinpackStepOpts_Validate(t *testing.T) {
{
name: "zero weights should raise error",
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 0.0,
- corev1.ResourceCPU: 0.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 0.0,
+ hv1.ResourceCPU: 0.0,
},
},
wantErr: true,
@@ -139,7 +138,7 @@ func TestKVMBinpackStepOpts_Validate(t *testing.T) {
{
name: "valid opts with empty resource weights",
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{},
+ ResourceWeights: map[hv1.ResourceName]float64{},
},
wantErr: true,
},
@@ -148,30 +147,10 @@ func TestKVMBinpackStepOpts_Validate(t *testing.T) {
opts: KVMBinpackStepOpts{},
wantErr: true,
},
- {
- name: "invalid opts with unsupported resource",
- opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceStorage: 1.0,
- },
- },
- wantErr: true,
- errMsg: "unsupported resource",
- },
- {
- name: "invalid opts with unsupported ephemeral-storage resource",
- opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceEphemeralStorage: 1.0,
- },
- },
- wantErr: true,
- errMsg: "unsupported resource",
- },
{
name: "invalid opts with custom unsupported resource",
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
+ ResourceWeights: map[hv1.ResourceName]float64{
"nvidia.com/gpu": 1.0,
},
},
@@ -221,8 +200,8 @@ func TestKVMBinpackStep_Run(t *testing.T) {
},
request: newBinpackRequest(8192, 4, 1, []string{"host1", "host2"}), // 8Gi memory
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{ // with 0.1 tolerance
@@ -243,8 +222,8 @@ func TestKVMBinpackStep_Run(t *testing.T) {
},
request: newBinpackRequest(8192, 4, 1, []string{"host1", "host2"}),
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: 1.0,
},
},
expectedWeights: map[string]float64{ // with 0.1 tolerance
@@ -261,9 +240,9 @@ func TestKVMBinpackStep_Run(t *testing.T) {
},
request: newBinpackRequest(8192, 4, 1, []string{"host1", "host2"}),
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: 1.0,
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: 1.0,
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{ // with 0.1 tolerance
@@ -281,9 +260,9 @@ func TestKVMBinpackStep_Run(t *testing.T) {
},
request: newBinpackRequest(8192, 4, 1, []string{"host1"}),
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: 2.0,
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: 2.0,
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{ // with 0.1 tolerance
@@ -299,8 +278,8 @@ func TestKVMBinpackStep_Run(t *testing.T) {
},
request: newBinpackRequest(8192, 4, 2, []string{"host1"}), // 2 instances
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: 1.0,
},
},
expectedWeights: map[string]float64{ // with 0.1 tolerance
@@ -314,8 +293,8 @@ func TestKVMBinpackStep_Run(t *testing.T) {
hypervisors: []*hv1.Hypervisor{},
request: newBinpackRequest(8192, 4, 1, []string{"host1", "host2"}),
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -333,8 +312,8 @@ func TestKVMBinpackStep_Run(t *testing.T) {
},
request: newBinpackRequest(8192, 4, 1, []string{"host1", "host2"}),
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -351,7 +330,7 @@ func TestKVMBinpackStep_Run(t *testing.T) {
},
request: newBinpackRequest(8192, 4, 1, []string{"host1"}),
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{},
+ ResourceWeights: map[hv1.ResourceName]float64{},
},
expectedWeights: map[string]float64{
"host1": 0, // No weights configured, score is 0
@@ -364,11 +343,11 @@ func TestKVMBinpackStep_Run(t *testing.T) {
{
ObjectMeta: metav1.ObjectMeta{Name: "host1"},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("0"),
"memory": resource.MustParse("100Gi"),
},
- Allocation: map[string]resource.Quantity{
+ Allocation: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("0"),
"memory": resource.MustParse("80Gi"),
},
@@ -377,8 +356,8 @@ func TestKVMBinpackStep_Run(t *testing.T) {
},
request: newBinpackRequest(8192, 4, 1, []string{"host1"}),
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -392,10 +371,10 @@ func TestKVMBinpackStep_Run(t *testing.T) {
{
ObjectMeta: metav1.ObjectMeta{Name: "host1"},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("100"),
},
- Allocation: map[string]resource.Quantity{
+ Allocation: map[hv1.ResourceName]resource.Quantity{
// No CPU allocation
},
},
@@ -403,8 +382,8 @@ func TestKVMBinpackStep_Run(t *testing.T) {
},
request: newBinpackRequest(8192, 4, 1, []string{"host1"}),
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -418,10 +397,10 @@ func TestKVMBinpackStep_Run(t *testing.T) {
{
ObjectMeta: metav1.ObjectMeta{Name: "host1"},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
// No CPU capacity
},
- Allocation: map[string]resource.Quantity{
+ Allocation: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("80"),
},
},
@@ -429,8 +408,8 @@ func TestKVMBinpackStep_Run(t *testing.T) {
},
request: newBinpackRequest(8192, 4, 1, []string{"host1"}),
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -446,8 +425,8 @@ func TestKVMBinpackStep_Run(t *testing.T) {
},
request: newBinpackRequest(20480, 20, 1, []string{"host1"}), // 20Gi, 20 CPUs - more than available
opts: KVMBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -547,7 +526,7 @@ func TestKVMBinpackStep_calcVMResources(t *testing.T) {
step := &KVMBinpackStep{}
resources := step.calcVMResources(tt.request)
- memResource, ok := resources[corev1.ResourceMemory]
+ memResource, ok := resources[hv1.ResourceMemory]
if !ok {
t.Error("expected memory resource to be present")
} else {
@@ -557,7 +536,7 @@ func TestKVMBinpackStep_calcVMResources(t *testing.T) {
}
}
- cpuResource, ok := resources[corev1.ResourceCPU]
+ cpuResource, ok := resources[hv1.ResourceCPU]
if !ok {
t.Error("expected CPU resource to be present")
} else {
diff --git a/internal/scheduling/nova/plugins/weighers/kvm_prefer_smaller_hosts.go b/internal/scheduling/nova/plugins/weighers/kvm_prefer_smaller_hosts.go
index 1bb070592..8bb5928ee 100644
--- a/internal/scheduling/nova/plugins/weighers/kvm_prefer_smaller_hosts.go
+++ b/internal/scheduling/nova/plugins/weighers/kvm_prefer_smaller_hosts.go
@@ -13,7 +13,6 @@ import (
api "github.com/cobaltcore-dev/cortex/api/external/nova"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
- corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
)
@@ -23,7 +22,7 @@ type KVMPreferSmallerHostsStepOpts struct {
// of the normalized distances from the smallest capacity for each resource.
// If a resource is not specified, it is ignored in the score calculation
// (equivalent to a weight of 0).
- ResourceWeights map[corev1.ResourceName]float64 `json:"resourceWeights"`
+ ResourceWeights map[hv1.ResourceName]float64 `json:"resourceWeights"`
}
// Validate the options to ensure they are correct before running the weigher.
@@ -31,9 +30,9 @@ func (o KVMPreferSmallerHostsStepOpts) Validate() error {
if len(o.ResourceWeights) == 0 {
return errors.New("at least one resource weight must be specified")
}
- supportedResources := []corev1.ResourceName{
- corev1.ResourceMemory,
- corev1.ResourceCPU,
+ supportedResources := []hv1.ResourceName{
+ hv1.ResourceMemory,
+ hv1.ResourceCPU,
}
for resourceName, val := range o.ResourceWeights {
if val < 0 {
@@ -73,8 +72,8 @@ func (s *KVMPreferSmallerHostsStep) Run(traceLog *slog.Logger, request api.Exter
}
// Calculate smallest and largest capacity for each resource across active hosts
- smallest := make(map[corev1.ResourceName]*resource.Quantity)
- largest := make(map[corev1.ResourceName]*resource.Quantity)
+ smallest := make(map[hv1.ResourceName]*resource.Quantity)
+ largest := make(map[hv1.ResourceName]*resource.Quantity)
for resourceName := range s.Options.ResourceWeights {
for _, hv := range hvs.Items {
@@ -82,7 +81,7 @@ func (s *KVMPreferSmallerHostsStep) Run(traceLog *slog.Logger, request api.Exter
if _, ok := result.Activations[hv.Name]; !ok {
continue
}
- capacity, ok := hv.Status.Capacity[resourceName.String()]
+ capacity, ok := hv.Status.Capacity[resourceName]
if !ok {
traceLog.Warn("hypervisor has no capacity for resource, skipping",
"host", hv.Name, "resource", resourceName)
@@ -107,7 +106,7 @@ func (s *KVMPreferSmallerHostsStep) Run(traceLog *slog.Logger, request api.Exter
var totalWeightedScore, totalWeight float64
for resourceName, weight := range s.Options.ResourceWeights {
- capacity, ok := hv.Status.Capacity[resourceName.String()]
+ capacity, ok := hv.Status.Capacity[resourceName]
if !ok {
traceLog.Warn("hypervisor has no capacity for resource, skipping",
"host", hv.Name, "resource", resourceName)
diff --git a/internal/scheduling/nova/plugins/weighers/kvm_prefer_smaller_hosts_test.go b/internal/scheduling/nova/plugins/weighers/kvm_prefer_smaller_hosts_test.go
index 545c124ab..e00593ea5 100644
--- a/internal/scheduling/nova/plugins/weighers/kvm_prefer_smaller_hosts_test.go
+++ b/internal/scheduling/nova/plugins/weighers/kvm_prefer_smaller_hosts_test.go
@@ -10,7 +10,6 @@ import (
api "github.com/cobaltcore-dev/cortex/api/external/nova"
hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
- corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -23,7 +22,7 @@ func newHypervisorWithCapacity(name, capacityCPU, capacityMem string) *hv1.Hyper
Name: name,
},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse(capacityCPU),
"memory": resource.MustParse(capacityMem),
},
@@ -77,9 +76,9 @@ func TestKVMPreferSmallerHostsStepOpts_Validate(t *testing.T) {
{
name: "valid opts with memory and cpu weights",
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
- corev1.ResourceCPU: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
+ hv1.ResourceCPU: 1.0,
},
},
wantErr: false,
@@ -87,8 +86,8 @@ func TestKVMPreferSmallerHostsStepOpts_Validate(t *testing.T) {
{
name: "valid opts with only memory weight",
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 2.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 2.0,
},
},
wantErr: false,
@@ -96,8 +95,8 @@ func TestKVMPreferSmallerHostsStepOpts_Validate(t *testing.T) {
{
name: "valid opts with only cpu weight",
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: 0.5,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: 0.5,
},
},
wantErr: false,
@@ -105,9 +104,9 @@ func TestKVMPreferSmallerHostsStepOpts_Validate(t *testing.T) {
{
name: "valid opts with zero weights",
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 0.0,
- corev1.ResourceCPU: 0.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 0.0,
+ hv1.ResourceCPU: 0.0,
},
},
wantErr: false,
@@ -115,7 +114,7 @@ func TestKVMPreferSmallerHostsStepOpts_Validate(t *testing.T) {
{
name: "invalid opts with empty resource weights",
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{},
+ ResourceWeights: map[hv1.ResourceName]float64{},
},
wantErr: true,
errMsg: "at least one resource weight must be specified",
@@ -129,8 +128,8 @@ func TestKVMPreferSmallerHostsStepOpts_Validate(t *testing.T) {
{
name: "invalid opts with negative weight",
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: -1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: -1.0,
},
},
wantErr: true,
@@ -139,37 +138,17 @@ func TestKVMPreferSmallerHostsStepOpts_Validate(t *testing.T) {
{
name: "invalid opts with negative cpu weight",
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: -0.5,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: -0.5,
},
},
wantErr: true,
errMsg: "resource weights must be greater than or equal to zero",
},
- {
- name: "invalid opts with unsupported resource",
- opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceStorage: 1.0,
- },
- },
- wantErr: true,
- errMsg: "unsupported resource",
- },
- {
- name: "invalid opts with unsupported ephemeral-storage resource",
- opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceEphemeralStorage: 1.0,
- },
- },
- wantErr: true,
- errMsg: "unsupported resource",
- },
{
name: "invalid opts with custom unsupported resource",
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
+ ResourceWeights: map[hv1.ResourceName]float64{
"nvidia.com/gpu": 1.0,
},
},
@@ -216,8 +195,8 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
},
request: newPreferSmallerHostsRequest([]string{"host1", "host2", "host3"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -236,8 +215,8 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
},
request: newPreferSmallerHostsRequest([]string{"host1", "host2", "host3"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -259,9 +238,9 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
},
request: newPreferSmallerHostsRequest([]string{"host1", "host2", "host3"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
- corev1.ResourceCPU: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
+ hv1.ResourceCPU: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -284,9 +263,9 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
},
request: newPreferSmallerHostsRequest([]string{"host1", "host2"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 2.0, // memory is weighted 2x
- corev1.ResourceCPU: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 2.0, // memory is weighted 2x
+ hv1.ResourceCPU: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -305,8 +284,8 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
},
request: newPreferSmallerHostsRequest([]string{"host1", "host2"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -324,8 +303,8 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
},
request: newPreferSmallerHostsRequest([]string{"host1", "host2", "host3"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -343,8 +322,8 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
},
request: newPreferSmallerHostsRequest([]string{"host1"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -358,8 +337,8 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
hypervisors: []*hv1.Hypervisor{},
request: newPreferSmallerHostsRequest([]string{"host1", "host2"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -378,8 +357,8 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
},
request: newPreferSmallerHostsRequest([]string{"host1", "host2", "host3"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -397,7 +376,7 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
{
ObjectMeta: metav1.ObjectMeta{Name: "host3"},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
"cpu": resource.MustParse("100"),
// No memory capacity
},
@@ -406,8 +385,8 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
},
request: newPreferSmallerHostsRequest([]string{"host1", "host2", "host3"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -427,8 +406,8 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
// Only host1 and host2 in the request (host3 was filtered out)
request: newPreferSmallerHostsRequest([]string{"host1", "host2"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -446,8 +425,8 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
},
request: newPreferSmallerHostsRequest([]string{"host1", "host2", "host3"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -468,8 +447,8 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
},
request: newPreferSmallerHostsRequest([]string{"host1", "host2", "host3", "host4"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -487,20 +466,20 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
{
ObjectMeta: metav1.ObjectMeta{Name: "host1"},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{},
+ Capacity: map[hv1.ResourceName]resource.Quantity{},
},
},
{
ObjectMeta: metav1.ObjectMeta{Name: "host2"},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{},
+ Capacity: map[hv1.ResourceName]resource.Quantity{},
},
},
},
request: newPreferSmallerHostsRequest([]string{"host1", "host2"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -518,8 +497,8 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
},
request: newPreferSmallerHostsRequest([]string{"host1", "host2"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -538,8 +517,8 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
},
request: newPreferSmallerHostsRequest([]string{"host1", "host2"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
},
},
expectedWeights: map[string]float64{
@@ -555,7 +534,7 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
{
ObjectMeta: metav1.ObjectMeta{Name: "host1"},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
"memory": resource.MustParse("64Gi"),
// No CPU
},
@@ -564,7 +543,7 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
{
ObjectMeta: metav1.ObjectMeta{Name: "host2"},
Status: hv1.HypervisorStatus{
- Capacity: map[string]resource.Quantity{
+ Capacity: map[hv1.ResourceName]resource.Quantity{
"memory": resource.MustParse("128Gi"),
// No CPU
},
@@ -573,9 +552,9 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
},
request: newPreferSmallerHostsRequest([]string{"host1", "host2"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
- corev1.ResourceCPU: 1.0, // CPU requested but not available
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
+ hv1.ResourceCPU: 1.0, // CPU requested but not available
},
},
expectedWeights: map[string]float64{
@@ -594,9 +573,9 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
},
request: newPreferSmallerHostsRequest([]string{"host1", "host2", "host3"}),
opts: KVMPreferSmallerHostsStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 0.0, // zero weight - ignored
- corev1.ResourceCPU: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 0.0, // zero weight - ignored
+ hv1.ResourceCPU: 1.0,
},
},
expectedWeights: map[string]float64{
diff --git a/internal/scheduling/nova/plugins/weighers/vmware_binpack.go b/internal/scheduling/nova/plugins/weighers/vmware_binpack.go
index 217dc7d7f..f52ad5162 100644
--- a/internal/scheduling/nova/plugins/weighers/vmware_binpack.go
+++ b/internal/scheduling/nova/plugins/weighers/vmware_binpack.go
@@ -14,6 +14,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
+ hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -25,7 +26,7 @@ type VMwareBinpackStepOpts struct {
// node's resource utilizations after placing the VM.
// If a resource is not specified, is ignored in the score calculation
// (equivalent to a weight of 0).
- ResourceWeights map[corev1.ResourceName]float64 `json:"resourceWeights"`
+ ResourceWeights map[hv1.ResourceName]float64 `json:"resourceWeights"`
}
// Validate the options to ensure they are correct before running the weigher.
@@ -33,9 +34,9 @@ func (o VMwareBinpackStepOpts) Validate() error {
if len(o.ResourceWeights) == 0 {
return errors.New("at least one resource weight must be specified")
}
- supportedResources := []corev1.ResourceName{
- corev1.ResourceMemory,
- corev1.ResourceCPU,
+ supportedResources := []hv1.ResourceName{
+ hv1.ResourceMemory,
+ hv1.ResourceCPU,
}
for resourceName, value := range o.ResourceWeights {
if !slices.Contains(supportedResources, resourceName) {
@@ -162,37 +163,37 @@ func (s *VMwareBinpackStep) Run(traceLog *slog.Logger, request api.ExternalSched
}
// calcHostCapacity calculates the total capacity of the host.
-func (s *VMwareBinpackStep) calcHostCapacity(hostUtilization compute.HostUtilization) map[corev1.ResourceName]resource.Quantity {
- resources := make(map[corev1.ResourceName]resource.Quantity)
+func (s *VMwareBinpackStep) calcHostCapacity(hostUtilization compute.HostUtilization) map[hv1.ResourceName]resource.Quantity {
+ resources := make(map[hv1.ResourceName]resource.Quantity)
capaMemoryBytes := int64(hostUtilization.TotalRAMAllocatableMB) * 1_000_000
- resources[corev1.ResourceMemory] = *resource.
+ resources[hv1.ResourceMemory] = *resource.
NewQuantity(capaMemoryBytes, resource.DecimalSI)
capaCPU := int64(hostUtilization.TotalVCPUsAllocatable)
- resources[corev1.ResourceCPU] = *resource.
+ resources[hv1.ResourceCPU] = *resource.
NewQuantity(capaCPU, resource.DecimalSI)
return resources
}
// calcHostAllocation calculates the total allocated resources on the host.
-func (s *VMwareBinpackStep) calcHostAllocation(hostUtilization compute.HostUtilization) map[corev1.ResourceName]resource.Quantity {
- resources := make(map[corev1.ResourceName]resource.Quantity)
- resources[corev1.ResourceMemory] = *resource.
+func (s *VMwareBinpackStep) calcHostAllocation(hostUtilization compute.HostUtilization) map[hv1.ResourceName]resource.Quantity {
+ resources := make(map[hv1.ResourceName]resource.Quantity)
+ resources[hv1.ResourceMemory] = *resource.
NewQuantity(int64(hostUtilization.RAMUsedMB)*1_000_000, resource.DecimalSI)
- resources[corev1.ResourceCPU] = *resource.
+ resources[hv1.ResourceCPU] = *resource.
NewQuantity(int64(hostUtilization.VCPUsUsed), resource.DecimalSI)
return resources
}
// calcVMResources calculates the total resource requests for the VM to be scheduled.
-func (s *VMwareBinpackStep) calcVMResources(req api.ExternalSchedulerRequest) map[corev1.ResourceName]resource.Quantity {
- resources := make(map[corev1.ResourceName]resource.Quantity)
+func (s *VMwareBinpackStep) calcVMResources(req api.ExternalSchedulerRequest) map[hv1.ResourceName]resource.Quantity {
+ resources := make(map[hv1.ResourceName]resource.Quantity)
resourcesMemBytes := int64(req.Spec.Data.Flavor.Data.MemoryMB * 1_000_000) //nolint:gosec // memory values are bounded by Nova
resourcesMemBytes *= int64(req.Spec.Data.NumInstances) //nolint:gosec // instance count is bounded by Nova
- resources[corev1.ResourceMemory] = *resource.
+ resources[hv1.ResourceMemory] = *resource.
NewQuantity(resourcesMemBytes, resource.DecimalSI)
resourcesCPU := int64(req.Spec.Data.Flavor.Data.VCPUs) //nolint:gosec // vCPU values are bounded by Nova
resourcesCPU *= int64(req.Spec.Data.NumInstances) //nolint:gosec // instance count is bounded by Nova
- resources[corev1.ResourceCPU] = *resource.
+ resources[hv1.ResourceCPU] = *resource.
NewQuantity(resourcesCPU, resource.DecimalSI)
return resources
}
diff --git a/internal/scheduling/nova/plugins/weighers/vmware_binpack_test.go b/internal/scheduling/nova/plugins/weighers/vmware_binpack_test.go
index cdca6c569..8574fafca 100644
--- a/internal/scheduling/nova/plugins/weighers/vmware_binpack_test.go
+++ b/internal/scheduling/nova/plugins/weighers/vmware_binpack_test.go
@@ -10,7 +10,7 @@ import (
api "github.com/cobaltcore-dev/cortex/api/external/nova"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
- corev1 "k8s.io/api/core/v1"
+ hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
@@ -24,9 +24,9 @@ func TestVMwareBinpackStepOpts_Validate(t *testing.T) {
{
name: "valid opts with memory and cpu",
opts: VMwareBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
- corev1.ResourceCPU: 1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
+ hv1.ResourceCPU: 1.0,
},
},
wantError: false,
@@ -34,8 +34,8 @@ func TestVMwareBinpackStepOpts_Validate(t *testing.T) {
{
name: "valid opts with only memory",
opts: VMwareBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 2.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 2.0,
},
},
wantError: false,
@@ -43,8 +43,8 @@ func TestVMwareBinpackStepOpts_Validate(t *testing.T) {
{
name: "valid opts with only cpu",
opts: VMwareBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: 0.5,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: 0.5,
},
},
wantError: false,
@@ -52,24 +52,15 @@ func TestVMwareBinpackStepOpts_Validate(t *testing.T) {
{
name: "invalid opts - empty resource weights",
opts: VMwareBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{},
- },
- wantError: true,
- },
- {
- name: "invalid opts - unsupported resource",
- opts: VMwareBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceStorage: 1.0,
- },
+ ResourceWeights: map[hv1.ResourceName]float64{},
},
wantError: true,
},
{
name: "invalid opts - zero weight",
opts: VMwareBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 0.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 0.0,
},
},
wantError: true,
@@ -77,8 +68,8 @@ func TestVMwareBinpackStepOpts_Validate(t *testing.T) {
{
name: "invalid opts - negative weight",
opts: VMwareBinpackStepOpts{
- ResourceWeights: map[corev1.ResourceName]float64{
- corev1.ResourceCPU: -1.0,
+ ResourceWeights: map[hv1.ResourceName]float64{
+ hv1.ResourceCPU: -1.0,
},
},
wantError: true,
@@ -142,9 +133,9 @@ func TestVMwareBinpackStep_Run(t *testing.T) {
}
step := &VMwareBinpackStep{}
- step.Options.ResourceWeights = map[corev1.ResourceName]float64{
- corev1.ResourceMemory: 1.0,
- corev1.ResourceCPU: 1.0,
+ step.Options.ResourceWeights = map[hv1.ResourceName]float64{
+ hv1.ResourceMemory: 1.0,
+ hv1.ResourceCPU: 1.0,
}
step.Client = fake.NewClientBuilder().
WithScheme(scheme).
@@ -242,7 +233,7 @@ func TestVMwareBinpackStep_CalcHostCapacity(t *testing.T) {
// Memory capacity: 6000 * 1_000_000 = 6_000_000_000 bytes
expectedMemoryBytes := int64(6000) * 1_000_000
- memoryCapacity := capacity[corev1.ResourceMemory]
+ memoryCapacity := capacity[hv1.ResourceMemory]
if memoryCapacity.Value() != expectedMemoryBytes {
t.Errorf("expected memory capacity %d, got %d",
expectedMemoryBytes, memoryCapacity.Value())
@@ -250,7 +241,7 @@ func TestVMwareBinpackStep_CalcHostCapacity(t *testing.T) {
// CPU capacity: 6
expectedCPU := int64(6)
- cpuCapacity := capacity[corev1.ResourceCPU]
+ cpuCapacity := capacity[hv1.ResourceCPU]
if cpuCapacity.Value() != expectedCPU {
t.Errorf("expected CPU capacity %d, got %d",
expectedCPU, cpuCapacity.Value())
@@ -272,7 +263,7 @@ func TestVMwareBinpackStep_CalcHostAllocation(t *testing.T) {
// Memory allocation: 4000 * 1_000_000 = 4_000_000_000 bytes
expectedMemoryBytes := int64(4000) * 1_000_000
- memoryAllocation := allocation[corev1.ResourceMemory]
+ memoryAllocation := allocation[hv1.ResourceMemory]
if memoryAllocation.Value() != expectedMemoryBytes {
t.Errorf("expected memory allocation %d, got %d",
expectedMemoryBytes, memoryAllocation.Value())
@@ -280,7 +271,7 @@ func TestVMwareBinpackStep_CalcHostAllocation(t *testing.T) {
// CPU allocation: 4
expectedCPU := int64(4)
- cpuAllocation := allocation[corev1.ResourceCPU]
+ cpuAllocation := allocation[hv1.ResourceCPU]
if cpuAllocation.Value() != expectedCPU {
t.Errorf("expected CPU allocation %d, got %d",
expectedCPU, cpuAllocation.Value())
@@ -338,13 +329,13 @@ func TestVMwareBinpackStep_CalcVMResources(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
resources := step.calcVMResources(tt.request)
- memoryResources := resources[corev1.ResourceMemory]
+ memoryResources := resources[hv1.ResourceMemory]
if memoryResources.Value() != tt.expectedMemory {
t.Errorf("expected memory %d, got %d",
tt.expectedMemory, memoryResources.Value())
}
- cpuResources := resources[corev1.ResourceCPU]
+ cpuResources := resources[hv1.ResourceCPU]
if cpuResources.Value() != tt.expectedCPU {
t.Errorf("expected CPU %d, got %d",
tt.expectedCPU, cpuResources.Value())
From 27da32b7db82f685cf4b0a29869bf0c96320ace8 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 16 Mar 2026 08:37:20 +0100
Subject: [PATCH 2/4] Revert changes to vmware binpack weigher
---
.../nova/plugins/weighers/vmware_binpack.go | 33 ++++++------
.../plugins/weighers/vmware_binpack_test.go | 53 +++++++++++--------
2 files changed, 47 insertions(+), 39 deletions(-)
diff --git a/internal/scheduling/nova/plugins/weighers/vmware_binpack.go b/internal/scheduling/nova/plugins/weighers/vmware_binpack.go
index f52ad5162..217dc7d7f 100644
--- a/internal/scheduling/nova/plugins/weighers/vmware_binpack.go
+++ b/internal/scheduling/nova/plugins/weighers/vmware_binpack.go
@@ -14,7 +14,6 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
"github.com/cobaltcore-dev/cortex/internal/scheduling/lib"
- hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -26,7 +25,7 @@ type VMwareBinpackStepOpts struct {
// node's resource utilizations after placing the VM.
// If a resource is not specified, is ignored in the score calculation
// (equivalent to a weight of 0).
- ResourceWeights map[hv1.ResourceName]float64 `json:"resourceWeights"`
+ ResourceWeights map[corev1.ResourceName]float64 `json:"resourceWeights"`
}
// Validate the options to ensure they are correct before running the weigher.
@@ -34,9 +33,9 @@ func (o VMwareBinpackStepOpts) Validate() error {
if len(o.ResourceWeights) == 0 {
return errors.New("at least one resource weight must be specified")
}
- supportedResources := []hv1.ResourceName{
- hv1.ResourceMemory,
- hv1.ResourceCPU,
+ supportedResources := []corev1.ResourceName{
+ corev1.ResourceMemory,
+ corev1.ResourceCPU,
}
for resourceName, value := range o.ResourceWeights {
if !slices.Contains(supportedResources, resourceName) {
@@ -163,37 +162,37 @@ func (s *VMwareBinpackStep) Run(traceLog *slog.Logger, request api.ExternalSched
}
// calcHostCapacity calculates the total capacity of the host.
-func (s *VMwareBinpackStep) calcHostCapacity(hostUtilization compute.HostUtilization) map[hv1.ResourceName]resource.Quantity {
- resources := make(map[hv1.ResourceName]resource.Quantity)
+func (s *VMwareBinpackStep) calcHostCapacity(hostUtilization compute.HostUtilization) map[corev1.ResourceName]resource.Quantity {
+ resources := make(map[corev1.ResourceName]resource.Quantity)
capaMemoryBytes := int64(hostUtilization.TotalRAMAllocatableMB) * 1_000_000
- resources[hv1.ResourceMemory] = *resource.
+ resources[corev1.ResourceMemory] = *resource.
NewQuantity(capaMemoryBytes, resource.DecimalSI)
capaCPU := int64(hostUtilization.TotalVCPUsAllocatable)
- resources[hv1.ResourceCPU] = *resource.
+ resources[corev1.ResourceCPU] = *resource.
NewQuantity(capaCPU, resource.DecimalSI)
return resources
}
// calcHostAllocation calculates the total allocated resources on the host.
-func (s *VMwareBinpackStep) calcHostAllocation(hostUtilization compute.HostUtilization) map[hv1.ResourceName]resource.Quantity {
- resources := make(map[hv1.ResourceName]resource.Quantity)
- resources[hv1.ResourceMemory] = *resource.
+func (s *VMwareBinpackStep) calcHostAllocation(hostUtilization compute.HostUtilization) map[corev1.ResourceName]resource.Quantity {
+ resources := make(map[corev1.ResourceName]resource.Quantity)
+ resources[corev1.ResourceMemory] = *resource.
NewQuantity(int64(hostUtilization.RAMUsedMB)*1_000_000, resource.DecimalSI)
- resources[hv1.ResourceCPU] = *resource.
+ resources[corev1.ResourceCPU] = *resource.
NewQuantity(int64(hostUtilization.VCPUsUsed), resource.DecimalSI)
return resources
}
// calcVMResources calculates the total resource requests for the VM to be scheduled.
-func (s *VMwareBinpackStep) calcVMResources(req api.ExternalSchedulerRequest) map[hv1.ResourceName]resource.Quantity {
- resources := make(map[hv1.ResourceName]resource.Quantity)
+func (s *VMwareBinpackStep) calcVMResources(req api.ExternalSchedulerRequest) map[corev1.ResourceName]resource.Quantity {
+ resources := make(map[corev1.ResourceName]resource.Quantity)
resourcesMemBytes := int64(req.Spec.Data.Flavor.Data.MemoryMB * 1_000_000) //nolint:gosec // memory values are bounded by Nova
resourcesMemBytes *= int64(req.Spec.Data.NumInstances) //nolint:gosec // instance count is bounded by Nova
- resources[hv1.ResourceMemory] = *resource.
+ resources[corev1.ResourceMemory] = *resource.
NewQuantity(resourcesMemBytes, resource.DecimalSI)
resourcesCPU := int64(req.Spec.Data.Flavor.Data.VCPUs) //nolint:gosec // vCPU values are bounded by Nova
resourcesCPU *= int64(req.Spec.Data.NumInstances) //nolint:gosec // instance count is bounded by Nova
- resources[hv1.ResourceCPU] = *resource.
+ resources[corev1.ResourceCPU] = *resource.
NewQuantity(resourcesCPU, resource.DecimalSI)
return resources
}
diff --git a/internal/scheduling/nova/plugins/weighers/vmware_binpack_test.go b/internal/scheduling/nova/plugins/weighers/vmware_binpack_test.go
index 8574fafca..cdca6c569 100644
--- a/internal/scheduling/nova/plugins/weighers/vmware_binpack_test.go
+++ b/internal/scheduling/nova/plugins/weighers/vmware_binpack_test.go
@@ -10,7 +10,7 @@ import (
api "github.com/cobaltcore-dev/cortex/api/external/nova"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
- hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
@@ -24,9 +24,9 @@ func TestVMwareBinpackStepOpts_Validate(t *testing.T) {
{
name: "valid opts with memory and cpu",
opts: VMwareBinpackStepOpts{
- ResourceWeights: map[hv1.ResourceName]float64{
- hv1.ResourceMemory: 1.0,
- hv1.ResourceCPU: 1.0,
+ ResourceWeights: map[corev1.ResourceName]float64{
+ corev1.ResourceMemory: 1.0,
+ corev1.ResourceCPU: 1.0,
},
},
wantError: false,
@@ -34,8 +34,8 @@ func TestVMwareBinpackStepOpts_Validate(t *testing.T) {
{
name: "valid opts with only memory",
opts: VMwareBinpackStepOpts{
- ResourceWeights: map[hv1.ResourceName]float64{
- hv1.ResourceMemory: 2.0,
+ ResourceWeights: map[corev1.ResourceName]float64{
+ corev1.ResourceMemory: 2.0,
},
},
wantError: false,
@@ -43,8 +43,8 @@ func TestVMwareBinpackStepOpts_Validate(t *testing.T) {
{
name: "valid opts with only cpu",
opts: VMwareBinpackStepOpts{
- ResourceWeights: map[hv1.ResourceName]float64{
- hv1.ResourceCPU: 0.5,
+ ResourceWeights: map[corev1.ResourceName]float64{
+ corev1.ResourceCPU: 0.5,
},
},
wantError: false,
@@ -52,15 +52,24 @@ func TestVMwareBinpackStepOpts_Validate(t *testing.T) {
{
name: "invalid opts - empty resource weights",
opts: VMwareBinpackStepOpts{
- ResourceWeights: map[hv1.ResourceName]float64{},
+ ResourceWeights: map[corev1.ResourceName]float64{},
+ },
+ wantError: true,
+ },
+ {
+ name: "invalid opts - unsupported resource",
+ opts: VMwareBinpackStepOpts{
+ ResourceWeights: map[corev1.ResourceName]float64{
+ corev1.ResourceStorage: 1.0,
+ },
},
wantError: true,
},
{
name: "invalid opts - zero weight",
opts: VMwareBinpackStepOpts{
- ResourceWeights: map[hv1.ResourceName]float64{
- hv1.ResourceMemory: 0.0,
+ ResourceWeights: map[corev1.ResourceName]float64{
+ corev1.ResourceMemory: 0.0,
},
},
wantError: true,
@@ -68,8 +77,8 @@ func TestVMwareBinpackStepOpts_Validate(t *testing.T) {
{
name: "invalid opts - negative weight",
opts: VMwareBinpackStepOpts{
- ResourceWeights: map[hv1.ResourceName]float64{
- hv1.ResourceCPU: -1.0,
+ ResourceWeights: map[corev1.ResourceName]float64{
+ corev1.ResourceCPU: -1.0,
},
},
wantError: true,
@@ -133,9 +142,9 @@ func TestVMwareBinpackStep_Run(t *testing.T) {
}
step := &VMwareBinpackStep{}
- step.Options.ResourceWeights = map[hv1.ResourceName]float64{
- hv1.ResourceMemory: 1.0,
- hv1.ResourceCPU: 1.0,
+ step.Options.ResourceWeights = map[corev1.ResourceName]float64{
+ corev1.ResourceMemory: 1.0,
+ corev1.ResourceCPU: 1.0,
}
step.Client = fake.NewClientBuilder().
WithScheme(scheme).
@@ -233,7 +242,7 @@ func TestVMwareBinpackStep_CalcHostCapacity(t *testing.T) {
// Memory capacity: 6000 * 1_000_000 = 6_000_000_000 bytes
expectedMemoryBytes := int64(6000) * 1_000_000
- memoryCapacity := capacity[hv1.ResourceMemory]
+ memoryCapacity := capacity[corev1.ResourceMemory]
if memoryCapacity.Value() != expectedMemoryBytes {
t.Errorf("expected memory capacity %d, got %d",
expectedMemoryBytes, memoryCapacity.Value())
@@ -241,7 +250,7 @@ func TestVMwareBinpackStep_CalcHostCapacity(t *testing.T) {
// CPU capacity: 6
expectedCPU := int64(6)
- cpuCapacity := capacity[hv1.ResourceCPU]
+ cpuCapacity := capacity[corev1.ResourceCPU]
if cpuCapacity.Value() != expectedCPU {
t.Errorf("expected CPU capacity %d, got %d",
expectedCPU, cpuCapacity.Value())
@@ -263,7 +272,7 @@ func TestVMwareBinpackStep_CalcHostAllocation(t *testing.T) {
// Memory allocation: 4000 * 1_000_000 = 4_000_000_000 bytes
expectedMemoryBytes := int64(4000) * 1_000_000
- memoryAllocation := allocation[hv1.ResourceMemory]
+ memoryAllocation := allocation[corev1.ResourceMemory]
if memoryAllocation.Value() != expectedMemoryBytes {
t.Errorf("expected memory allocation %d, got %d",
expectedMemoryBytes, memoryAllocation.Value())
@@ -271,7 +280,7 @@ func TestVMwareBinpackStep_CalcHostAllocation(t *testing.T) {
// CPU allocation: 4
expectedCPU := int64(4)
- cpuAllocation := allocation[hv1.ResourceCPU]
+ cpuAllocation := allocation[corev1.ResourceCPU]
if cpuAllocation.Value() != expectedCPU {
t.Errorf("expected CPU allocation %d, got %d",
expectedCPU, cpuAllocation.Value())
@@ -329,13 +338,13 @@ func TestVMwareBinpackStep_CalcVMResources(t *testing.T) {
t.Run(tt.name, func(t *testing.T) {
resources := step.calcVMResources(tt.request)
- memoryResources := resources[hv1.ResourceMemory]
+ memoryResources := resources[corev1.ResourceMemory]
if memoryResources.Value() != tt.expectedMemory {
t.Errorf("expected memory %d, got %d",
tt.expectedMemory, memoryResources.Value())
}
- cpuResources := resources[hv1.ResourceCPU]
+ cpuResources := resources[corev1.ResourceCPU]
if cpuResources.Value() != tt.expectedCPU {
t.Errorf("expected CPU %d, got %d",
tt.expectedCPU, cpuResources.Value())
From 26b3615de58e1658fbc75f353174b6e9dbd70bb5 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 16 Mar 2026 08:48:37 +0100
Subject: [PATCH 3/4] Consistently use hv1.ResourceName also in Reservation CRD
---
api/v1alpha1/reservation_types.go | 5 ++-
api/v1alpha1/zz_generated.deepcopy.go | 5 ++-
.../compute/resource_capacity_kvm_test.go | 44 +++++++++----------
internal/scheduling/nova/integration_test.go | 20 ++++-----
.../filters/filter_has_enough_capacity.go | 4 +-
.../filter_has_enough_capacity_test.go | 26 +++++------
.../nova/plugins/weighers/kvm_binpack_test.go | 20 ++++-----
.../weighers/kvm_failover_evacuation_test.go | 12 ++---
.../weighers/kvm_prefer_smaller_hosts_test.go | 10 ++---
.../commitments/reservation_manager.go | 7 +--
.../commitments/reservation_manager_test.go | 25 ++++++-----
.../reservations/commitments/state_test.go | 17 +++----
.../reservations/commitments/syncer_test.go | 7 +--
.../controller/controller_test.go | 7 +--
.../reservations/controller/monitor.go | 2 +-
.../reservations/controller/monitor_test.go | 31 ++++++-------
16 files changed, 125 insertions(+), 117 deletions(-)
diff --git a/api/v1alpha1/reservation_types.go b/api/v1alpha1/reservation_types.go
index df3ad473e..913a93a8f 100644
--- a/api/v1alpha1/reservation_types.go
+++ b/api/v1alpha1/reservation_types.go
@@ -4,6 +4,7 @@
package v1alpha1
import (
+ hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -44,7 +45,7 @@ type CommittedResourceAllocation struct {
// Resources consumed by this instance.
// +kubebuilder:validation:Required
- Resources map[string]resource.Quantity `json:"resources"`
+ Resources map[hv1.ResourceName]resource.Quantity `json:"resources"`
}
// CommittedResourceReservationSpec defines the spec fields specific to committed resource reservations.
@@ -99,7 +100,7 @@ type ReservationSpec struct {
// Resources to reserve for this instance.
// +kubebuilder:validation:Optional
- Resources map[string]resource.Quantity `json:"resources,omitempty"`
+ Resources map[hv1.ResourceName]resource.Quantity `json:"resources,omitempty"`
// StartTime is the time when the reservation becomes active.
// +kubebuilder:validation:Optional
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index 564f30cac..96043cc1f 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -8,6 +8,7 @@
package v1alpha1
import (
+ apiv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -35,7 +36,7 @@ func (in *CommittedResourceAllocation) DeepCopyInto(out *CommittedResourceAlloca
in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp)
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
- *out = make(map[string]resource.Quantity, len(*in))
+ *out = make(map[apiv1.ResourceName]resource.Quantity, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
@@ -1218,7 +1219,7 @@ func (in *ReservationSpec) DeepCopyInto(out *ReservationSpec) {
*out = *in
if in.Resources != nil {
in, out := &in.Resources, &out.Resources
- *out = make(map[string]resource.Quantity, len(*in))
+ *out = make(map[apiv1.ResourceName]resource.Quantity, len(*in))
for key, val := range *in {
(*out)[key] = val.DeepCopy()
}
diff --git a/internal/knowledge/kpis/plugins/compute/resource_capacity_kvm_test.go b/internal/knowledge/kpis/plugins/compute/resource_capacity_kvm_test.go
index a900cbdf4..d0f3b1780 100644
--- a/internal/knowledge/kpis/plugins/compute/resource_capacity_kvm_test.go
+++ b/internal/knowledge/kpis/plugins/compute/resource_capacity_kvm_test.go
@@ -59,12 +59,12 @@ func TestKVMResourceCapacityKPI_Collect(t *testing.T) {
},
Status: hv1.HypervisorStatus{
Capacity: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("128"),
- "memory": resource.MustParse("512Gi"),
+ hv1.ResourceCPU: resource.MustParse("128"),
+ hv1.ResourceMemory: resource.MustParse("512Gi"),
},
Allocation: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("64"),
- "memory": resource.MustParse("256Gi"),
+ hv1.ResourceCPU: resource.MustParse("64"),
+ hv1.ResourceMemory: resource.MustParse("256Gi"),
},
Traits: []string{},
},
@@ -149,12 +149,12 @@ func TestKVMResourceCapacityKPI_Collect(t *testing.T) {
},
Status: hv1.HypervisorStatus{
Capacity: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("256"),
- "memory": resource.MustParse("1Ti"),
+ hv1.ResourceCPU: resource.MustParse("256"),
+ hv1.ResourceMemory: resource.MustParse("1Ti"),
},
Allocation: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("128"),
- "memory": resource.MustParse("512Gi"),
+ hv1.ResourceCPU: resource.MustParse("128"),
+ hv1.ResourceMemory: resource.MustParse("512Gi"),
},
Traits: []string{
"CUSTOM_HW_SAPPHIRE_RAPIDS",
@@ -210,12 +210,12 @@ func TestKVMResourceCapacityKPI_Collect(t *testing.T) {
},
Status: hv1.HypervisorStatus{
Capacity: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("64"),
- "memory": resource.MustParse("256Gi"),
+ hv1.ResourceCPU: resource.MustParse("64"),
+ hv1.ResourceMemory: resource.MustParse("256Gi"),
},
Allocation: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("32"),
- "memory": resource.MustParse("128Gi"),
+ hv1.ResourceCPU: resource.MustParse("32"),
+ hv1.ResourceMemory: resource.MustParse("128Gi"),
},
Traits: []string{
"CUSTOM_DECOMMISSIONING",
@@ -256,12 +256,12 @@ func TestKVMResourceCapacityKPI_Collect(t *testing.T) {
},
Status: hv1.HypervisorStatus{
Capacity: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("100"),
- "memory": resource.MustParse("200Gi"),
+ hv1.ResourceCPU: resource.MustParse("100"),
+ hv1.ResourceMemory: resource.MustParse("200Gi"),
},
Allocation: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("50"),
- "memory": resource.MustParse("100Gi"),
+ hv1.ResourceCPU: resource.MustParse("50"),
+ hv1.ResourceMemory: resource.MustParse("100Gi"),
},
Traits: []string{},
},
@@ -275,12 +275,12 @@ func TestKVMResourceCapacityKPI_Collect(t *testing.T) {
},
Status: hv1.HypervisorStatus{
Capacity: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("200"),
- "memory": resource.MustParse("400Gi"),
+ hv1.ResourceCPU: resource.MustParse("200"),
+ hv1.ResourceMemory: resource.MustParse("400Gi"),
},
Allocation: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("150"),
- "memory": resource.MustParse("300Gi"),
+ hv1.ResourceCPU: resource.MustParse("150"),
+ hv1.ResourceMemory: resource.MustParse("300Gi"),
},
Traits: []string{"CUSTOM_HW_SAPPHIRE_RAPIDS"},
},
@@ -333,8 +333,8 @@ func TestKVMResourceCapacityKPI_Collect(t *testing.T) {
},
Status: hv1.HypervisorStatus{
Capacity: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("96"),
- "memory": resource.MustParse("384Gi"),
+ hv1.ResourceCPU: resource.MustParse("96"),
+ hv1.ResourceMemory: resource.MustParse("384Gi"),
},
// No Allocation field - simulating missing data
Allocation: nil,
diff --git a/internal/scheduling/nova/integration_test.go b/internal/scheduling/nova/integration_test.go
index f066ad821..596d9f2ed 100644
--- a/internal/scheduling/nova/integration_test.go
+++ b/internal/scheduling/nova/integration_test.go
@@ -49,12 +49,12 @@ func newHypervisor(name, cpuCap, cpuAlloc, memCap, memAlloc string) *hv1.Hypervi
},
Status: hv1.HypervisorStatus{
Capacity: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse(cpuCap),
- "memory": resource.MustParse(memCap),
+ hv1.ResourceCPU: resource.MustParse(cpuCap),
+ hv1.ResourceMemory: resource.MustParse(memCap),
},
Allocation: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse(cpuAlloc),
- "memory": resource.MustParse(memAlloc),
+ hv1.ResourceCPU: resource.MustParse(cpuAlloc),
+ hv1.ResourceMemory: resource.MustParse(memAlloc),
},
},
}
@@ -68,9 +68,9 @@ func newCommittedReservation(name, targetHost, observedHost, projectID, flavorNa
Spec: v1alpha1.ReservationSpec{
Type: v1alpha1.ReservationTypeCommittedResource,
TargetHost: targetHost,
- Resources: map[string]resource.Quantity{
- "cpu": resource.MustParse(cpu),
- "memory": resource.MustParse(memory),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceCPU: resource.MustParse(cpu),
+ hv1.ResourceMemory: resource.MustParse(memory),
},
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ProjectID: projectID,
@@ -100,9 +100,9 @@ func newFailoverReservation(name, targetHost, resourceGroup, cpu, memory string,
Spec: v1alpha1.ReservationSpec{
Type: v1alpha1.ReservationTypeFailover,
TargetHost: targetHost,
- Resources: map[string]resource.Quantity{
- "cpu": resource.MustParse(cpu),
- "memory": resource.MustParse(memory),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceCPU: resource.MustParse(cpu),
+ hv1.ResourceMemory: resource.MustParse(memory),
},
FailoverReservation: &v1alpha1.FailoverReservationSpec{
ResourceGroup: resourceGroup,
diff --git a/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity.go b/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity.go
index a390f5e98..8852f6151 100644
--- a/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity.go
+++ b/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity.go
@@ -145,7 +145,7 @@ func (s *FilterHasEnoughCapacity) Run(traceLog *slog.Logger, request api.Externa
// For CR reservations with allocations, calculate remaining (unallocated) resources to block.
// This prevents double-blocking of resources already consumed by running instances.
- var resourcesToBlock map[string]resource.Quantity
+ var resourcesToBlock map[hv1.ResourceName]resource.Quantity
if reservation.Spec.Type == v1alpha1.ReservationTypeCommittedResource &&
// if the reservation is not being migrated, block only unused resources
reservation.Spec.TargetHost == reservation.Status.Host &&
@@ -154,7 +154,7 @@ func (s *FilterHasEnoughCapacity) Run(traceLog *slog.Logger, request api.Externa
len(reservation.Spec.CommittedResourceReservation.Allocations) > 0 &&
len(reservation.Status.CommittedResourceReservation.Allocations) > 0 {
// Start with full reservation resources
- resourcesToBlock = make(map[string]resource.Quantity)
+ resourcesToBlock = make(map[hv1.ResourceName]resource.Quantity)
for k, v := range reservation.Spec.Resources {
resourcesToBlock[k] = v.DeepCopy()
}
diff --git a/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity_test.go b/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity_test.go
index a4cd621ac..504bbb523 100644
--- a/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity_test.go
+++ b/internal/scheduling/nova/plugins/filters/filter_has_enough_capacity_test.go
@@ -40,12 +40,12 @@ func newHypervisor(name, cpuCap, cpuAlloc, memCap, memAlloc string) *hv1.Hypervi
},
Status: hv1.HypervisorStatus{
Capacity: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse(cpuCap),
- "memory": resource.MustParse(memCap),
+ hv1.ResourceCPU: resource.MustParse(cpuCap),
+ hv1.ResourceMemory: resource.MustParse(memCap),
},
Allocation: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse(cpuAlloc),
- "memory": resource.MustParse(memAlloc),
+ hv1.ResourceCPU: resource.MustParse(cpuAlloc),
+ hv1.ResourceMemory: resource.MustParse(memAlloc),
},
},
}
@@ -64,9 +64,9 @@ func newCommittedReservation(
Spec: v1alpha1.ReservationSpec{
Type: v1alpha1.ReservationTypeCommittedResource,
TargetHost: targetHost,
- Resources: map[string]resource.Quantity{
- "cpu": resource.MustParse(cpu),
- "memory": resource.MustParse(memory),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceCPU: resource.MustParse(cpu),
+ hv1.ResourceMemory: resource.MustParse(memory),
},
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ProjectID: projectID,
@@ -104,9 +104,9 @@ func newFailoverReservation(name, targetHost, cpu, memory string, allocations ma
Spec: v1alpha1.ReservationSpec{
Type: v1alpha1.ReservationTypeFailover,
TargetHost: targetHost,
- Resources: map[string]resource.Quantity{
- "cpu": resource.MustParse(cpu),
- "memory": resource.MustParse(memory),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceCPU: resource.MustParse(cpu),
+ hv1.ResourceMemory: resource.MustParse(memory),
},
FailoverReservation: &v1alpha1.FailoverReservationSpec{
ResourceGroup: "m1.large",
@@ -150,9 +150,9 @@ func crSpecAllocs(vms ...crVmAlloc) map[string]v1alpha1.CommittedResourceAllocat
for _, v := range vms {
allocs[v.uuid] = v1alpha1.CommittedResourceAllocation{
CreationTimestamp: metav1.Now(),
- Resources: map[string]resource.Quantity{
- "cpu": resource.MustParse(v.cpu),
- "memory": resource.MustParse(v.mem),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceCPU: resource.MustParse(v.cpu),
+ hv1.ResourceMemory: resource.MustParse(v.mem),
},
}
}
diff --git a/internal/scheduling/nova/plugins/weighers/kvm_binpack_test.go b/internal/scheduling/nova/plugins/weighers/kvm_binpack_test.go
index 6c742ad84..e867c5bf7 100644
--- a/internal/scheduling/nova/plugins/weighers/kvm_binpack_test.go
+++ b/internal/scheduling/nova/plugins/weighers/kvm_binpack_test.go
@@ -23,12 +23,12 @@ func newHypervisor(name, capacityCPU, capacityMem, allocationCPU, allocationMem
},
Status: hv1.HypervisorStatus{
Capacity: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse(capacityCPU),
- "memory": resource.MustParse(capacityMem),
+ hv1.ResourceCPU: resource.MustParse(capacityCPU),
+ hv1.ResourceMemory: resource.MustParse(capacityMem),
},
Allocation: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse(allocationCPU),
- "memory": resource.MustParse(allocationMem),
+ hv1.ResourceCPU: resource.MustParse(allocationCPU),
+ hv1.ResourceMemory: resource.MustParse(allocationMem),
},
},
}
@@ -344,12 +344,12 @@ func TestKVMBinpackStep_Run(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Name: "host1"},
Status: hv1.HypervisorStatus{
Capacity: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("0"),
- "memory": resource.MustParse("100Gi"),
+ hv1.ResourceCPU: resource.MustParse("0"),
+ hv1.ResourceMemory: resource.MustParse("100Gi"),
},
Allocation: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("0"),
- "memory": resource.MustParse("80Gi"),
+ hv1.ResourceCPU: resource.MustParse("0"),
+ hv1.ResourceMemory: resource.MustParse("80Gi"),
},
},
},
@@ -372,7 +372,7 @@ func TestKVMBinpackStep_Run(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Name: "host1"},
Status: hv1.HypervisorStatus{
Capacity: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("100"),
+ hv1.ResourceCPU: resource.MustParse("100"),
},
Allocation: map[hv1.ResourceName]resource.Quantity{
// No CPU allocation
@@ -401,7 +401,7 @@ func TestKVMBinpackStep_Run(t *testing.T) {
// No CPU capacity
},
Allocation: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("80"),
+ hv1.ResourceCPU: resource.MustParse("80"),
},
},
},
diff --git a/internal/scheduling/nova/plugins/weighers/kvm_failover_evacuation_test.go b/internal/scheduling/nova/plugins/weighers/kvm_failover_evacuation_test.go
index 9c3ace3ec..0664e55d4 100644
--- a/internal/scheduling/nova/plugins/weighers/kvm_failover_evacuation_test.go
+++ b/internal/scheduling/nova/plugins/weighers/kvm_failover_evacuation_test.go
@@ -49,9 +49,9 @@ func newFailoverReservation(name, targetHost string, failed bool, allocations ma
Spec: v1alpha1.ReservationSpec{
Type: v1alpha1.ReservationTypeFailover,
TargetHost: targetHost,
- Resources: map[string]resource.Quantity{
- "cpu": resource.MustParse("4"),
- "memory": resource.MustParse("8Gi"),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceCPU: resource.MustParse("4"),
+ hv1.ResourceMemory: resource.MustParse("8Gi"),
},
FailoverReservation: &v1alpha1.FailoverReservationSpec{
ResourceGroup: "m1.large",
@@ -84,9 +84,9 @@ func newCommittedReservation(name, targetHost string) *v1alpha1.Reservation {
Spec: v1alpha1.ReservationSpec{
Type: v1alpha1.ReservationTypeCommittedResource,
TargetHost: targetHost,
- Resources: map[string]resource.Quantity{
- "cpu": resource.MustParse("4"),
- "memory": resource.MustParse("8Gi"),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceCPU: resource.MustParse("4"),
+ hv1.ResourceMemory: resource.MustParse("8Gi"),
},
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ProjectID: "project-A",
diff --git a/internal/scheduling/nova/plugins/weighers/kvm_prefer_smaller_hosts_test.go b/internal/scheduling/nova/plugins/weighers/kvm_prefer_smaller_hosts_test.go
index e00593ea5..4a1b70e20 100644
--- a/internal/scheduling/nova/plugins/weighers/kvm_prefer_smaller_hosts_test.go
+++ b/internal/scheduling/nova/plugins/weighers/kvm_prefer_smaller_hosts_test.go
@@ -23,8 +23,8 @@ func newHypervisorWithCapacity(name, capacityCPU, capacityMem string) *hv1.Hyper
},
Status: hv1.HypervisorStatus{
Capacity: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse(capacityCPU),
- "memory": resource.MustParse(capacityMem),
+ hv1.ResourceCPU: resource.MustParse(capacityCPU),
+ hv1.ResourceMemory: resource.MustParse(capacityMem),
},
},
}
@@ -377,7 +377,7 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Name: "host3"},
Status: hv1.HypervisorStatus{
Capacity: map[hv1.ResourceName]resource.Quantity{
- "cpu": resource.MustParse("100"),
+ hv1.ResourceCPU: resource.MustParse("100"),
// No memory capacity
},
},
@@ -535,7 +535,7 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Name: "host1"},
Status: hv1.HypervisorStatus{
Capacity: map[hv1.ResourceName]resource.Quantity{
- "memory": resource.MustParse("64Gi"),
+ hv1.ResourceMemory: resource.MustParse("64Gi"),
// No CPU
},
},
@@ -544,7 +544,7 @@ func TestKVMPreferSmallerHostsStep_Run(t *testing.T) {
ObjectMeta: metav1.ObjectMeta{Name: "host2"},
Status: hv1.HypervisorStatus{
Capacity: map[hv1.ResourceName]resource.Quantity{
- "memory": resource.MustParse("128Gi"),
+ hv1.ResourceMemory: resource.MustParse("128Gi"),
// No CPU
},
},
diff --git a/internal/scheduling/reservations/commitments/reservation_manager.go b/internal/scheduling/reservations/commitments/reservation_manager.go
index 350de7e8c..3929e434f 100644
--- a/internal/scheduling/reservations/commitments/reservation_manager.go
+++ b/internal/scheduling/reservations/commitments/reservation_manager.go
@@ -9,6 +9,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
+ hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
"github.com/go-logr/logr"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
@@ -265,12 +266,12 @@ func (m *ReservationManager) newReservation(
spec := v1alpha1.ReservationSpec{
Type: v1alpha1.ReservationTypeCommittedResource,
- Resources: map[string]resource.Quantity{
- "memory": *resource.NewQuantity(
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: *resource.NewQuantity(
memoryBytes,
resource.BinarySI,
),
- "cpu": *resource.NewQuantity(
+ hv1.ResourceCPU: *resource.NewQuantity(
cpus,
resource.DecimalSI,
),
diff --git a/internal/scheduling/reservations/commitments/reservation_manager_test.go b/internal/scheduling/reservations/commitments/reservation_manager_test.go
index d8cf9c267..b47ad6fb2 100644
--- a/internal/scheduling/reservations/commitments/reservation_manager_test.go
+++ b/internal/scheduling/reservations/commitments/reservation_manager_test.go
@@ -9,6 +9,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
+ hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
"github.com/go-logr/logr"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -89,8 +90,8 @@ func TestApplyCommitmentState_DeletesExcessReservations(t *testing.T) {
},
},
Spec: v1alpha1.ReservationSpec{
- Resources: map[string]resource.Quantity{
- "memory": *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI),
},
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ProjectID: "project-1",
@@ -108,8 +109,8 @@ func TestApplyCommitmentState_DeletesExcessReservations(t *testing.T) {
},
},
Spec: v1alpha1.ReservationSpec{
- Resources: map[string]resource.Quantity{
- "memory": *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI),
},
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ProjectID: "project-1",
@@ -193,8 +194,8 @@ func TestApplyCommitmentState_PreservesAllocatedReservations(t *testing.T) {
},
},
Spec: v1alpha1.ReservationSpec{
- Resources: map[string]resource.Quantity{
- "memory": *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI),
},
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ProjectID: "project-1",
@@ -214,8 +215,8 @@ func TestApplyCommitmentState_PreservesAllocatedReservations(t *testing.T) {
},
},
Spec: v1alpha1.ReservationSpec{
- Resources: map[string]resource.Quantity{
- "memory": *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI),
},
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ProjectID: "project-1",
@@ -299,8 +300,8 @@ func TestApplyCommitmentState_HandlesZeroCapacity(t *testing.T) {
},
},
Spec: v1alpha1.ReservationSpec{
- Resources: map[string]resource.Quantity{
- "memory": *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI),
},
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ProjectID: "project-1",
@@ -377,8 +378,8 @@ func TestApplyCommitmentState_FixesWrongFlavorGroup(t *testing.T) {
},
},
Spec: v1alpha1.ReservationSpec{
- Resources: map[string]resource.Quantity{
- "memory": *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI),
},
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ProjectID: "project-1",
diff --git a/internal/scheduling/reservations/commitments/state_test.go b/internal/scheduling/reservations/commitments/state_test.go
index d8581cec1..7060300db 100644
--- a/internal/scheduling/reservations/commitments/state_test.go
+++ b/internal/scheduling/reservations/commitments/state_test.go
@@ -8,6 +8,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
+ hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -84,8 +85,8 @@ func TestFromReservations_SumsMemoryCorrectly(t *testing.T) {
Name: "commitment-abc123-0",
},
Spec: v1alpha1.ReservationSpec{
- Resources: map[string]resource.Quantity{
- "memory": *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), // 8 GiB
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI), // 8 GiB
},
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ProjectID: "project-1",
@@ -98,8 +99,8 @@ func TestFromReservations_SumsMemoryCorrectly(t *testing.T) {
Name: "commitment-abc123-1",
},
Spec: v1alpha1.ReservationSpec{
- Resources: map[string]resource.Quantity{
- "memory": *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI), // 16 GiB
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI), // 16 GiB
},
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ProjectID: "project-1",
@@ -146,8 +147,8 @@ func TestFromReservations_SkipsInconsistentFlavorGroup(t *testing.T) {
Name: "commitment-abc123-0",
},
Spec: v1alpha1.ReservationSpec{
- Resources: map[string]resource.Quantity{
- "memory": *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: *resource.NewQuantity(8*1024*1024*1024, resource.BinarySI),
},
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ProjectID: "project-1",
@@ -160,8 +161,8 @@ func TestFromReservations_SkipsInconsistentFlavorGroup(t *testing.T) {
Name: "commitment-abc123-1",
},
Spec: v1alpha1.ReservationSpec{
- Resources: map[string]resource.Quantity{
- "memory": *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: *resource.NewQuantity(16*1024*1024*1024, resource.BinarySI),
},
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ProjectID: "project-1",
diff --git a/internal/scheduling/reservations/commitments/syncer_test.go b/internal/scheduling/reservations/commitments/syncer_test.go
index 0790545e8..3799b0ce3 100644
--- a/internal/scheduling/reservations/commitments/syncer_test.go
+++ b/internal/scheduling/reservations/commitments/syncer_test.go
@@ -9,6 +9,7 @@ import (
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
"github.com/cobaltcore-dev/cortex/internal/knowledge/extractor/plugins/compute"
+ hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -338,9 +339,9 @@ func TestSyncer_SyncReservations_UpdateExisting(t *testing.T) {
ResourceGroup: "old_group",
Creator: CreatorValue,
},
- Resources: map[string]resource.Quantity{
- "memory": resource.MustParse("512Mi"),
- "cpu": resource.MustParse("1"),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: resource.MustParse("512Mi"),
+ hv1.ResourceCPU: resource.MustParse("1"),
},
},
}
diff --git a/internal/scheduling/reservations/controller/controller_test.go b/internal/scheduling/reservations/controller/controller_test.go
index 548857d3a..0ef3e253c 100644
--- a/internal/scheduling/reservations/controller/controller_test.go
+++ b/internal/scheduling/reservations/controller/controller_test.go
@@ -10,6 +10,7 @@ import (
"net/http/httptest"
"testing"
+ hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -148,9 +149,9 @@ func TestReservationReconciler_reconcileInstanceReservation_Success(t *testing.T
ProjectID: "test-project",
ResourceName: "test-flavor",
},
- Resources: map[string]resource.Quantity{
- "memory": resource.MustParse("1Gi"),
- "cpu": resource.MustParse("2"),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: resource.MustParse("1Gi"),
+ hv1.ResourceCPU: resource.MustParse("2"),
},
},
}
diff --git a/internal/scheduling/reservations/controller/monitor.go b/internal/scheduling/reservations/controller/monitor.go
index 3e6c6dae6..0c0ad2875 100644
--- a/internal/scheduling/reservations/controller/monitor.go
+++ b/internal/scheduling/reservations/controller/monitor.go
@@ -101,7 +101,7 @@ func (m *Monitor) Collect(ch chan<- prometheus.Metric) {
resourcesByLabels[key] = map[string]uint64{}
}
for resourceName, resourceQuantity := range reservation.Spec.Resources {
- resourcesByLabels[key][resourceName] += resourceQuantity.AsDec().UnscaledBig().Uint64()
+ resourcesByLabels[key][string(resourceName)] += resourceQuantity.AsDec().UnscaledBig().Uint64()
}
}
for key, resources := range resourcesByLabels {
diff --git a/internal/scheduling/reservations/controller/monitor_test.go b/internal/scheduling/reservations/controller/monitor_test.go
index fef88e35e..eef11892e 100644
--- a/internal/scheduling/reservations/controller/monitor_test.go
+++ b/internal/scheduling/reservations/controller/monitor_test.go
@@ -14,6 +14,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"github.com/cobaltcore-dev/cortex/api/v1alpha1"
+ hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
)
@@ -98,9 +99,9 @@ func TestMonitor_Collect_WithReservations(t *testing.T) {
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ResourceName: "test-flavor",
},
- Resources: map[string]resource.Quantity{
- "memory": resource.MustParse("1Gi"),
- "cpu": resource.MustParse("2"),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: resource.MustParse("1Gi"),
+ hv1.ResourceCPU: resource.MustParse("2"),
},
},
Status: v1alpha1.ReservationStatus{
@@ -123,9 +124,9 @@ func TestMonitor_Collect_WithReservations(t *testing.T) {
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ResourceName: "test-flavor",
},
- Resources: map[string]resource.Quantity{
- "memory": resource.MustParse("2Gi"),
- "cpu": resource.MustParse("4"),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: resource.MustParse("2Gi"),
+ hv1.ResourceCPU: resource.MustParse("4"),
},
},
Status: v1alpha1.ReservationStatus{
@@ -148,9 +149,9 @@ func TestMonitor_Collect_WithReservations(t *testing.T) {
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ResourceName: "test-flavor",
},
- Resources: map[string]resource.Quantity{
- "memory": resource.MustParse("4Gi"),
- "cpu": resource.MustParse("4"),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: resource.MustParse("4Gi"),
+ hv1.ResourceCPU: resource.MustParse("4"),
},
},
Status: v1alpha1.ReservationStatus{
@@ -244,9 +245,9 @@ func TestMonitor_Collect_ResourceMetrics(t *testing.T) {
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ResourceName: "test-flavor",
},
- Resources: map[string]resource.Quantity{
- "memory": resource.MustParse("1000Mi"),
- "cpu": resource.MustParse("2"),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: resource.MustParse("1000Mi"),
+ hv1.ResourceCPU: resource.MustParse("2"),
},
},
Status: v1alpha1.ReservationStatus{
@@ -367,9 +368,9 @@ func TestMonitor_Collect_LabelSanitization(t *testing.T) {
CommittedResourceReservation: &v1alpha1.CommittedResourceReservationSpec{
ResourceName: "test-flavor",
},
- Resources: map[string]resource.Quantity{
- "memory": resource.MustParse("1Gi"),
- "cpu": resource.MustParse("2"),
+ Resources: map[hv1.ResourceName]resource.Quantity{
+ hv1.ResourceMemory: resource.MustParse("1Gi"),
+ hv1.ResourceCPU: resource.MustParse("2"),
},
},
Status: v1alpha1.ReservationStatus{
From 8c9c007f4b92b3f1ff1cf41ce03163f2810bb682 Mon Sep 17 00:00:00 2001
From: Philipp Matthes
Date: Mon, 16 Mar 2026 09:10:40 +0100
Subject: [PATCH 4/4] Address nits
---
.../reservations/commitments/reservation_manager.go | 8 ++++----
.../reservations/commitments/reservation_manager_test.go | 6 +++---
.../scheduling/reservations/commitments/syncer_test.go | 8 ++++----
3 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/internal/scheduling/reservations/commitments/reservation_manager.go b/internal/scheduling/reservations/commitments/reservation_manager.go
index 3929e434f..13856d992 100644
--- a/internal/scheduling/reservations/commitments/reservation_manager.go
+++ b/internal/scheduling/reservations/commitments/reservation_manager.go
@@ -78,7 +78,7 @@ func (m *ReservationManager) ApplyCommitmentState(
}
deltaMemoryBytes := desiredState.TotalMemoryBytes
for _, res := range existing {
- memoryQuantity := res.Spec.Resources["memory"]
+ memoryQuantity := res.Spec.Resources[hv1.ResourceMemory]
deltaMemoryBytes -= memoryQuantity.Value()
}
@@ -105,7 +105,7 @@ func (m *ReservationManager) ApplyCommitmentState(
"expectedProjectID", desiredState.ProjectID,
"actualProjectID", res.Spec.CommittedResourceReservation.ProjectID)
removedReservations = append(removedReservations, res)
- memValue := res.Spec.Resources["memory"]
+ memValue := res.Spec.Resources[hv1.ResourceMemory]
deltaMemoryBytes += memValue.Value()
if err := m.Delete(ctx, &res); err != nil {
@@ -133,7 +133,7 @@ func (m *ReservationManager) ApplyCommitmentState(
existing = existing[:len(existing)-1] // remove from existing list
}
removedReservations = append(removedReservations, *reservationToDelete)
- memValue := reservationToDelete.Spec.Resources["memory"]
+ memValue := reservationToDelete.Spec.Resources[hv1.ResourceMemory]
deltaMemoryBytes += memValue.Value()
log.Info("deleting reservation",
@@ -154,7 +154,7 @@ func (m *ReservationManager) ApplyCommitmentState(
// TODO more sophisticated flavor selection, especially with flavors of different cpu/memory ratio
reservation := m.newReservation(desiredState, nextSlotIndex, deltaMemoryBytes, flavorGroup, creator)
touchedReservations = append(touchedReservations, *reservation)
- memValue := reservation.Spec.Resources["memory"]
+ memValue := reservation.Spec.Resources[hv1.ResourceMemory]
deltaMemoryBytes -= memValue.Value()
log.Info("creating reservation",
diff --git a/internal/scheduling/reservations/commitments/reservation_manager_test.go b/internal/scheduling/reservations/commitments/reservation_manager_test.go
index b47ad6fb2..8022999fb 100644
--- a/internal/scheduling/reservations/commitments/reservation_manager_test.go
+++ b/internal/scheduling/reservations/commitments/reservation_manager_test.go
@@ -65,7 +65,7 @@ func TestApplyCommitmentState_CreatesNewReservations(t *testing.T) {
// Verify created reservations sum to desired state
totalMemory := int64(0)
for _, res := range touched {
- memQuantity := res.Spec.Resources["memory"]
+ memQuantity := res.Spec.Resources[hv1.ResourceMemory]
totalMemory += memQuantity.Value()
}
@@ -169,7 +169,7 @@ func TestApplyCommitmentState_DeletesExcessReservations(t *testing.T) {
totalMemory := int64(0)
for _, res := range remainingList.Items {
- memQuantity := res.Spec.Resources["memory"]
+ memQuantity := res.Spec.Resources[hv1.ResourceMemory]
totalMemory += memQuantity.Value()
}
@@ -531,7 +531,7 @@ func TestNewReservation_SelectsAppropriateFlavor(t *testing.T) {
}
// Verify CPU allocation
- cpuQuantity := reservation.Spec.Resources["cpu"]
+ cpuQuantity := reservation.Spec.Resources[hv1.ResourceCPU]
if cpuQuantity.Value() != tt.expectedCores {
t.Errorf("expected %d cores, got %d",
tt.expectedCores, cpuQuantity.Value())
diff --git a/internal/scheduling/reservations/commitments/syncer_test.go b/internal/scheduling/reservations/commitments/syncer_test.go
index 3799b0ce3..75512299a 100644
--- a/internal/scheduling/reservations/commitments/syncer_test.go
+++ b/internal/scheduling/reservations/commitments/syncer_test.go
@@ -294,13 +294,13 @@ func TestSyncer_SyncReservations_InstanceCommitments(t *testing.T) {
// Check resource values - should be sized for the flavor that fits
// With 2048MB total capacity, we can fit 2x 1024MB flavors
expectedMemory := resource.MustParse("1073741824") // 1024MB in bytes
- if !res.Spec.Resources["memory"].Equal(expectedMemory) {
- t.Errorf("Expected memory %v, got %v", expectedMemory, res.Spec.Resources["memory"])
+ if !res.Spec.Resources[hv1.ResourceMemory].Equal(expectedMemory) {
+ t.Errorf("Expected memory %v, got %v", expectedMemory, res.Spec.Resources[hv1.ResourceMemory])
}
expectedVCPUs := resource.MustParse("2")
- if !res.Spec.Resources["cpu"].Equal(expectedVCPUs) {
- t.Errorf("Expected vCPUs %v, got %v", expectedVCPUs, res.Spec.Resources["cpu"])
+ if !res.Spec.Resources[hv1.ResourceCPU].Equal(expectedVCPUs) {
+ t.Errorf("Expected vCPUs %v, got %v", expectedVCPUs, res.Spec.Resources[hv1.ResourceCPU])
}
}