Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions cluster-autoscaler/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ You should also take a look at the notes and "gotchas" for your specific cloud p
* [OVHcloud](./cloudprovider/ovhcloud/README.md)
* [Rancher](./cloudprovider/rancher/README.md)
* [Scaleway](./cloudprovider/scaleway/README.md)
* [Slicer](./cloudprovider/slicer/README.md)
* [TencentCloud](./cloudprovider/tencentcloud/README.md)
* [Utho](./cloudprovider/utho/README.md)
* [Vultr](./cloudprovider/vultr/README.md)
Expand Down Expand Up @@ -242,6 +243,7 @@ Supported cloud providers:
* OVHcloud https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/ovhcloud/README.md
* Rancher https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/rancher/README.md
* Scaleway https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/scaleway/README.md
* Slicer https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/slicer/README.md
* TencentCloud https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/tencentcloud/README.md
* Utho https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/utho/README.md
* Vultr https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/vultr/README.md
2 changes: 1 addition & 1 deletion cluster-autoscaler/charts/cluster-autoscaler/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,4 @@ name: cluster-autoscaler
sources:
- https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler
type: application
version: 9.52.1
version: 9.52.2
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ rules:
{{- if (eq .Values.cloudProvider "kwok") }}
- create
{{- end }}
{{- if or (eq .Values.cloudProvider "kwok") (eq .Values.cloudProvider "huaweicloud") }}
{{- if or (eq .Values.cloudProvider "kwok") (eq .Values.cloudProvider "huaweicloud") (eq .Values.cloudProvider "slicer") }}
- delete
{{- end }}
- get
Expand Down
9 changes: 7 additions & 2 deletions cluster-autoscaler/cloudprovider/builder/builder_all.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
//go:build !gce && !aws && !azure && !kubemark && !alicloud && !magnum && !digitalocean && !clusterapi && !huaweicloud && !ionoscloud && !linode && !hetzner && !bizflycloud && !brightbox && !equinixmetal && !oci && !vultr && !tencentcloud && !scaleway && !externalgrpc && !civo && !rancher && !volcengine && !baiducloud && !cherry && !cloudstack && !exoscale && !kamatera && !ovhcloud && !kwok && !utho && !coreweave
// +build !gce,!aws,!azure,!kubemark,!alicloud,!magnum,!digitalocean,!clusterapi,!huaweicloud,!ionoscloud,!linode,!hetzner,!bizflycloud,!brightbox,!equinixmetal,!oci,!vultr,!tencentcloud,!scaleway,!externalgrpc,!civo,!rancher,!volcengine,!baiducloud,!cherry,!cloudstack,!exoscale,!kamatera,!ovhcloud,!kwok,!utho,!coreweave
//go:build !gce && !aws && !azure && !kubemark && !alicloud && !magnum && !digitalocean && !clusterapi && !huaweicloud && !ionoscloud && !linode && !hetzner && !bizflycloud && !brightbox && !equinixmetal && !oci && !vultr && !tencentcloud && !scaleway && !externalgrpc && !civo && !rancher && !volcengine && !baiducloud && !cherry && !cloudstack && !exoscale && !kamatera && !ovhcloud && !kwok && !utho && !coreweave && !slicer
// +build !gce,!aws,!azure,!kubemark,!alicloud,!magnum,!digitalocean,!clusterapi,!huaweicloud,!ionoscloud,!linode,!hetzner,!bizflycloud,!brightbox,!equinixmetal,!oci,!vultr,!tencentcloud,!scaleway,!externalgrpc,!civo,!rancher,!volcengine,!baiducloud,!cherry,!cloudstack,!exoscale,!kamatera,!ovhcloud,!kwok,!utho,!coreweave,!slicer

/*
Copyright 2018 The Kubernetes Authors.
Expand Down Expand Up @@ -48,6 +48,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/ovhcloud"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/rancher"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/scaleway"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/slicer"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/tencentcloud"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/utho"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/volcengine"
Expand Down Expand Up @@ -89,6 +90,7 @@ var AvailableCloudProviders = []string{
cloudprovider.VolcengineProviderName,
cloudprovider.UthoProviderName,
cloudprovider.CoreWeaveProviderName,
cloudprovider.SlicerProviderName,
}

// DefaultCloudProvider is GCE.
Expand Down Expand Up @@ -161,6 +163,9 @@ func buildCloudProvider(opts *coreoptions.AutoscalerOptions,
return utho.BuildUtho(opts, do, rl)
case cloudprovider.CoreWeaveProviderName:
return coreweave.BuildCoreWeave(opts, do, rl)
case cloudprovider.SlicerProviderName:
return slicer.BuildSlicer(opts, do, rl)
}

return nil
}
44 changes: 44 additions & 0 deletions cluster-autoscaler/cloudprovider/builder/builder_slicer.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
//go:build slicer
// +build slicer

/*
Copyright 2025 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package builder

import (
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider/slicer"
coreoptions "k8s.io/autoscaler/cluster-autoscaler/core/options"
"k8s.io/client-go/informers"
)

// AvailableCloudProviders supported by the slicer cloud provider builder.
var AvailableCloudProviders = []string{
cloudprovider.SlicerProviderName,
}

// DefaultCloudProvider for Slicer-only build is Slicer.
const DefaultCloudProvider = cloudprovider.SlicerProviderName

func buildCloudProvider(opts *coreoptions.AutoscalerOptions, do cloudprovider.NodeGroupDiscoveryOptions, rl *cloudprovider.ResourceLimiter, _ informers.SharedInformerFactory) cloudprovider.CloudProvider {
switch opts.CloudProviderName {
case cloudprovider.SlicerProviderName:
return slicer.BuildSlicer(opts, do, rl)
}

return nil
}
2 changes: 2 additions & 0 deletions cluster-autoscaler/cloudprovider/cloud_provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,8 @@ const (
RancherProviderName = "rancher"
// UthoProviderName gets the provider name of utho
UthoProviderName = "utho"
// SlicerProviderName gets the provider name of slicer
SlicerProviderName = "slicer"
)

// GpuConfig contains the label, type and the resource name for a GPU.
Expand Down
115 changes: 115 additions & 0 deletions cluster-autoscaler/cloudprovider/slicer/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
## Slicer Cloud Provider for Cluster Autoscaler

The cluster autoscaler for [Slicer](https://slicervm.com/) scales nodes on lightweight slicer microVMs.

The architecture is as follows:

* Slicer runs a K3s control plane on one virtualisation host.
* Slicer runs all agents on one or more additional virtualisation hosts running the Slicer REST API. Starting off with zero microVMs and relying on cluster autoscaler to add new ones as Pods cannot be scheduled to the existing set of nodes.

Check the documentation on [SlicerVM.com](https://docs.slicervm.com/examples/autoscaling-k3s/) for instructions on how to setup the cluster-autoscaler with with Slicer.

## Configuration

The `cluster-autoscaler` with Slicer needs a configuration file to work by using the `--cloud-config` parameter, it is an INI file with the following fields:

| Key | Value | Mandatory | Default |
|-----|-------|-----------|---------|
| `global/k3s-url` | The URL of the K3s control plane API server | yes | none |
| `global/k3s-token` | The K3s join token for adding new agent nodes | yes | none |
| `global/ca-bundle` | Path to custom CA bundle file for Slicer API calls | no | none (uses system default CAs) |
| `global/default-min-size` | Default minimum size of a node group (must be > 0) | no | 1 |
| `global/default-max-size` | Default maximum size of a node group | no | 8 |
| `nodegroup \"slicer_host_group_name\"/slicer-url` | The URL of the Slicer API server for this node group | yes | none |
| `nodegroup \"slicer_host_group_name\"/slicer-token` | The authentication token for the Slicer API server | yes | none |
| `nodegroup \"slicer_host_group_name\"/min-size` | Minimum size for a specific node group | no | global/defaut-min-size |
| `nodegroup \"slicer_host_group_name\"/max-size` | Maximum size for a specific node group | no | global/defaut-max-size |

## Development

Follow the instructions in the [slicer docs](https://docs.slicervm.com/examples/autoscaling-k3s/) to setup a K3S cluster and host groups for nodes.

Make sure you are inside the `cluster-autoscaler` path of the [autoscaler repository](https://github.com/kubernetes/autoscaler).

### Run out of cluster

Start the cluster-autoscaler:

```bash
#!/bin/bash
go run . \
--cloud-provider=slicer \
--kubeconfig $HOME/k3s-cp-kubeconfig \
--scale-down-enabled=true \
--scale-down-delay-after-add=30s \
--scale-down-unneeded-time=30s \
--expendable-pods-priority-cutoff=-10 \
--cloud-config="$HOME/cloud-config.ini" \
--v=4
```

### Run in cluster.

Build and publish an image:

```sh
REGISTRY=ttl.sh/openfaasltd BUILD_TAGS=slicer TAG=dev make dev-release
```

Create a the cloud-config secret:

```sh
kubectl create secret generic cluster-autoscaler-cloud-config \
--from-file=cloud-config=cloud-config.ini \
-n kube-system
```

Create a `values.yaml` for the cluster-autoscaler chart:

```yaml
image:
repository: ttl.sh/openfaasltd/cluster-autoscaler-slicer-amd64
tag: dev

cloudProvider: slicer

fullnameOverride: cluster-autoscaler-slicer

autoDiscovery:
clusterName: k3s-slicer

# Mount the cluster-autoscaler-cloud-config secret
extraVolumeSecrets:
cluster-autoscaler-cloud-config:
name: cluster-autoscaler-cloud-config
mountPath: /etc/slicer/
items:
- key: cloud-config
path: cloud-config

# All your required parameters
extraArgs:
cloud-config: /etc/slicer/cloud-config
# Standard logging
logtostderr: true
stderrthreshold: info
v: 4

scale-down-enabled: true
scale-down-delay-after-add: "30s"
scale-down-unneeded-time: "30s"
expendable-pods-priority-cutoff: -10
```

Deploy with Helm:

```sh
helm install cluster-autoscaler-slicer charts/cluster-autoscaler \
--namespace=kube-system \
--values=values.yaml
```

To test the autoscaler do one of the following:

* Scale a deployment higher than can fit on the current set of control-plane nodes, then wait for the autoscaler to scale up the cluster.
* Or, create a taint / affinity / anti-affinity rule that will prevent a pod from being scheduled to the existing set of nodes, then wait for the autoscaler to scale up the cluster.
Loading