Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion build/components/versions.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ firmware:
libvirt: v10.9.0
edk2: stable202411
core:
3p-kubevirt: v1.6.2-v12n.20
3p-kubevirt: dvp/set-memory-limits-while-hotplugging
3p-containerized-data-importer: v1.60.3-v12n.17
distribution: 2.8.3
package:
Expand Down
1 change: 1 addition & 0 deletions images/virt-artifact/werf.inc.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ secrets:
shell:
install:
- |
echo rebuild 33
echo "Git clone {{ $gitRepoName }} repository..."
git clone --depth=1 $(cat /run/secrets/SOURCE_REPO)/{{ $gitRepoUrl }} --branch {{ $tag }} /src/kubevirt

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,10 @@ const (
AnnVMRestartRequested = AnnAPIGroupV + "/vm-restart-requested"

// AnnVMOPWorkloadUpdate is an annotation on vmop that represents a vmop created by workload-updater controller.
AnnVMOPWorkloadUpdate = AnnAPIGroupV + "/workload-update"
AnnVMOPWorkloadUpdateImage = AnnAPIGroupV + "/workload-update-image"
AnnVMOPWorkloadUpdateNodePlacementSum = AnnAPIGroupV + "/workload-update-node-placement-sum"
AnnVMOPWorkloadUpdate = AnnAPIGroupV + "/workload-update"
AnnVMOPWorkloadUpdateImage = AnnAPIGroupV + "/workload-update-image"
AnnVMOPWorkloadUpdateNodePlacementSum = AnnAPIGroupV + "/workload-update-node-placement-sum"
AnnVMOPWorkloadUpdateHotplugResourcesSum = AnnAPIGroupV + "/workload-update-hotplug-resources-sum"
// AnnVMRestore is an annotation on a resource that indicates it was created by the vmrestore controller; the value is the UID of the `VirtualMachineRestore` resource.
AnnVMRestore = AnnAPIGroupV + "/vmrestore"
// AnnVMOPEvacuation is an annotation on vmop that represents a vmop created by evacuation controller
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
"github.com/deckhouse/virtualization-controller/pkg/common/array"
"github.com/deckhouse/virtualization-controller/pkg/common/resource_builder"
"github.com/deckhouse/virtualization-controller/pkg/common/vm"
"github.com/deckhouse/virtualization-controller/pkg/featuregates"
"github.com/deckhouse/virtualization/api/core/v1alpha2"
)

Expand All @@ -46,6 +47,9 @@ const (

// GenericCPUModel specifies the base CPU model for Features and Discovery CPU model types.
GenericCPUModel = "qemu64"

MaxMemorySizeForHotplug = 256 * 1024 * 1024 * 1024 // 256 Gi (safely limit to not overlap somewhat conservative 38 bit physical address space)
EnableMemoryHotplugThreshold = 1 * 1024 * 1024 * 1024 // 1 Gi (no hotplug for VMs with less than 1Gi)
)

type KVVMOptions struct {
Expand Down Expand Up @@ -269,7 +273,25 @@ func (b *KVVM) SetCPU(cores int, coreFraction string) error {
return nil
}

// SetMemory sets memory in kvvm.
// There are 2 possibilities to set memory:
// 1. Use domain.memory.guest field: it enabled memory hotplugging, but not set resources.limits.
// 2. Explicitly set limits and requests in domain.resources. No hotplugging in this scenario.
//
// (1) is a new approach, and (2) should be respected for Running VMs started by previous version of the controller.
func (b *KVVM) SetMemory(memorySize resource.Quantity) {
// Support for VMs started with memory size in requests-limits.
// TODO delete this in the future (around 3-4 more versions after enabling memory hotplug by default).
if b.ResourceExists && isVMRunningWithMemoryResources(b.Resource) {
b.setMemoryNonHotpluggable(memorySize)
return
}
b.setMemoryHotpluggable(memorySize)
}

// setMemoryNonHotpluggable translates memory size to requests and limits in KVVM.
// Note: this is a first implementation, memory hotplug is not compatible with this strategy.
func (b *KVVM) setMemoryNonHotpluggable(memorySize resource.Quantity) {
res := &b.Resource.Spec.Template.Spec.Domain.Resources
if res.Requests == nil {
res.Requests = make(map[corev1.ResourceName]resource.Quantity)
Expand All @@ -281,6 +303,57 @@ func (b *KVVM) SetMemory(memorySize resource.Quantity) {
res.Limits[corev1.ResourceMemory] = memorySize
}

// setMemoryHotpluggable translates memory size to settings in domain.memory field.
// This field is compatible with memory hotplug.
// Also, remove requests-limits for memory if any.
func (b *KVVM) setMemoryHotpluggable(memorySize resource.Quantity) {
domain := &b.Resource.Spec.Template.Spec.Domain

currentMaxGuest := int64(-1)
if domain.Memory != nil && domain.Memory.MaxGuest != nil {
currentMaxGuest = domain.Memory.MaxGuest.Value()
}

domain.Memory = &virtv1.Memory{
Guest: &memorySize,
}

// Set maxMemory to enable hotplug for mem size >= 1Gi.
hotplugThreshold := resource.NewQuantity(EnableMemoryHotplugThreshold, resource.BinarySI)
if featuregates.Default().Enabled(featuregates.HotplugMemoryWithLiveMigration) {
if memorySize.Cmp(*hotplugThreshold) >= 0 {
maxMemory := resource.NewQuantity(MaxMemorySizeForHotplug, resource.BinarySI)
domain.Memory.MaxGuest = maxMemory
}
}
// Set maxGuest to 0 if hotplug is disabled now (mem size < 1Gi) and maxGuest was previously set.
// Zero value is just a flag to patch memory and remove maxGuest before updating kvvm.
if memorySize.Cmp(*hotplugThreshold) == -1 && currentMaxGuest > 0 {
domain.Memory.MaxGuest = resource.NewQuantity(0, resource.BinarySI)
}

// Remove memory limits and requests if set by previous implementation.
res := &b.Resource.Spec.Template.Spec.Domain.Resources
delete(res.Requests, corev1.ResourceMemory)
delete(res.Limits, corev1.ResourceMemory)
}

func isVMRunningWithMemoryResources(kvvm *virtv1.VirtualMachine) bool {
if kvvm == nil {
return false
}

if kvvm.Status.PrintableStatus != virtv1.VirtualMachineStatusRunning {
return false
}

res := kvvm.Spec.Template.Spec.Domain.Resources
_, hasMemoryRequests := res.Requests[corev1.ResourceMemory]
_, hasMemoryLimits := res.Limits[corev1.ResourceMemory]

return hasMemoryRequests && hasMemoryLimits
}

func GetCPURequest(cores int, coreFraction string) (*resource.Quantity, error) {
if coreFraction == "" {
return GetCPULimit(cores), nil
Expand Down Expand Up @@ -473,7 +546,7 @@ func (b *KVVM) SetProvisioning(p *v1alpha2.Provisioning) error {
}
}

func (b *KVVM) SetOsType(osType v1alpha2.OsType) error {
func (b *KVVM) SetOSType(osType v1alpha2.OsType) error {
switch osType {
case v1alpha2.Windows:
// Need for `029-use-OFVM_CODE-for-linux.patch`
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,25 +119,25 @@ func TestSetAffinity(t *testing.T) {
}
}

func TestSetOsType(t *testing.T) {
func TestSetOSType(t *testing.T) {
name := "test-name"
namespace := "test-namespace"

t.Run("Change from Windows to Generic should remove TPM", func(t *testing.T) {
builder := NewEmptyKVVM(types.NamespacedName{Name: name, Namespace: namespace}, KVVMOptions{})

err := builder.SetOsType(v1alpha2.Windows)
err := builder.SetOSType(v1alpha2.Windows)
if err != nil {
t.Fatalf("SetOsType(Windows) failed: %v", err)
t.Fatalf("SetOSType(Windows) failed: %v", err)
}

if builder.Resource.Spec.Template.Spec.Domain.Devices.TPM == nil {
t.Error("TPM should be present after setting Windows OS")
}

err = builder.SetOsType(v1alpha2.GenericOs)
err = builder.SetOSType(v1alpha2.GenericOs)
if err != nil {
t.Fatalf("SetOsType(GenericOs) failed: %v", err)
t.Fatalf("SetOSType(GenericOs) failed: %v", err)
}

if builder.Resource.Spec.Template.Spec.Domain.Devices.TPM != nil {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ func ApplyVirtualMachineSpec(
if err := kvvm.SetRunPolicy(vm.Spec.RunPolicy); err != nil {
return err
}
if err := kvvm.SetOsType(vm.Spec.OsType); err != nil {
if err := kvvm.SetOSType(vm.Spec.OsType); err != nil {
return err
}
if err := kvvm.SetBootloader(vm.Spec.Bootloader); err != nil {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package reconciler
import (
"context"
"errors"
"fmt"
"reflect"
"strings"
"time"
Expand Down Expand Up @@ -102,7 +103,8 @@ handlersLoop:
switch {
case err == nil: // OK.
case errors.Is(err, ErrStopHandlerChain):
log.Debug("Handler chain execution stopped")
msg := fmt.Sprintf("Handler %s stopped chain execution", name)
log.Debug(msg)
result = MergeResults(result, res)
break handlersLoop
case k8serrors.IsConflict(err):
Expand Down
Loading
Loading