diff --git a/.github/wb-logo.png b/.github/wb-logo.png old mode 100644 new mode 100755 diff --git a/.gitignore b/.gitignore old mode 100644 new mode 100755 index c741d0e..045bab4 --- a/.gitignore +++ b/.gitignore @@ -7,4 +7,10 @@ cert-issuer.yaml *.tfvars node_modules staging/* -js/ \ No newline at end of file +js/ + +/install.sh +assets +logs +packages/ +*.gz \ No newline at end of file diff --git a/.prettierrc b/.prettierrc old mode 100644 new mode 100755 index 10d3bee..98adcf6 --- a/.prettierrc +++ b/.prettierrc @@ -1,8 +1,6 @@ { "parser": "typescript", - "trailingComma": "es5", + "trailingComma": "all", "singleQuote": true, - "bracketSpacing": false, - "jsxBracketSameLine": true, - "tabWidth": 2 + "semi": false } diff --git a/.vscode/settings.json b/.vscode/settings.json old mode 100644 new mode 100755 diff --git a/CHANGELOG.md b/CHANGELOG.md old mode 100644 new mode 100755 diff --git a/LICENSE b/LICENSE old mode 100644 new mode 100755 diff --git a/README.md b/README.md old mode 100644 new mode 100755 diff --git a/SECURITY.md b/SECURITY.md old mode 100644 new mode 100755 diff --git a/installer/build.ts b/installer/build.ts new file mode 100755 index 0000000..14a78a9 --- /dev/null +++ b/installer/build.ts @@ -0,0 +1,79 @@ +import * as fs from 'fs' +// import * as readline from 'readline' +import { spawnSync } from 'child_process' + +const INSTALLER_SCRIPT = `${__dirname}/install.sh` +const INSTALLER_SCRIPT_VARIABLE_DIR = 'DIR' + +const generateAirgapScript = Boolean(process.env.AIRGAP) + +function readBashVariables(filePath: string) { + const content = fs.readFileSync(filePath, { encoding: 'utf-8' }) + const lines = content.split('\n') + const result: Record = {} + for (const line of lines) { + const l = line.startsWith('export ') ? line.slice(7) : line + const match = l.match(/^\s*([\w-]+)\s*=\s*(.*)\s*/) + if (match) { + const variable = match[1] + const value = match[2].replace(/^"|^'|"$/g, '') + const output = spawnSync('bash', ['-c', `echo ${value}`]) + + result[variable] = output.stdout.toString().trim() + } + } + return result +} + +function getImportLines(filePath: string) { + const content = fs.readFileSync(filePath, { encoding: 'utf-8' }) + const lines = content.split('\n') + const imports: string[] = [] + let withinTags = false + for (const line of lines) { + if (line.startsWith('# ')) withinTags = false + if (withinTags && !line.startsWith('#')) imports.push(line) + if (line.includes('# ')) withinTags = true + } + return imports +} + +function main() { + const scriptVariables = readBashVariables(INSTALLER_SCRIPT) + const dir = scriptVariables[INSTALLER_SCRIPT_VARIABLE_DIR] + + if (dir == null) { + console.error( + `Could not find '${INSTALLER_SCRIPT_VARIABLE_DIR}' variable in bash script`, + ) + return + } + + let content = fs + .readFileSync(INSTALLER_SCRIPT, { encoding: 'utf-8' }) + .split('\n') + + if (generateAirgapScript) + content = content.map((d) => + d.startsWith('export AIRGAP=') ? 'export AIRGAP=1' : d, + ) + + for (const importLine of getImportLines(INSTALLER_SCRIPT)) { + const importScriptPath = importLine.replace( + `. $${INSTALLER_SCRIPT_VARIABLE_DIR}`, + dir, + ) + console.log(`Importing: ${importScriptPath} (${importLine})`) + const importScript = fs.readFileSync(importScriptPath, { + encoding: 'utf-8', + }) + + const i = content.findIndex((l) => l === importLine) + if (i === -1) continue + content[i] = `${importScript}\n` + } + + fs.writeFileSync('./install.sh', content.join('\n')) +} + +main() diff --git a/installer/bundle.sh b/installer/bundle.sh new file mode 100644 index 0000000..43b4afa --- /dev/null +++ b/installer/bundle.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -e + +DIR=. + +# +. $DIR/installer/configs/1.27.2.sh +. $DIR/installer/configs/base.sh +. $DIR/installer/common/kubernetes.sh +. $DIR/installer/common/addons.sh +. $DIR/installer/common/kubeadm.sh +. $DIR/installer/common/logging.sh +. $DIR/installer/common/discover.sh +. $DIR/installer/common/packages.sh +. $DIR/installer/common/semver.sh +. $DIR/installer/common/utils.sh +. $DIR/installer/common/dependencies.sh +. $DIR/installer/common/images.sh +# + +log_step "Generating installer" +pnpm build:installer:airgap + +kubernetes_packages_download +dependencies_download +images_download +addons_download + +log_step "Creating tar" +tar -czvf installer-$ARCH-$KUBERNETES_VERSION.tar.gz $DIR/install.sh $DIR/packages + +printf "\n" \ No newline at end of file diff --git a/installer/common/addons.sh b/installer/common/addons.sh new file mode 100644 index 0000000..ec845a0 --- /dev/null +++ b/installer/common/addons.sh @@ -0,0 +1,18 @@ +function addons_download() { + mkdir -p $MANIFEST + if [ -z "$AIRGAP" ] || [ "$AIRGAP" != "1" ]; then + log_step "Downloading addons" + + local manifest_path=$MANIFEST/contour.yaml + # TODO: figure out a way to lock the script down + local manifest_url="https://projectcontour.io/quickstart/contour.yaml" + package_download_url_with_retry "$manifest_url" "$manifest_path" + fi +} + +function addons_install() { + mkdir -p $MANIFEST + + local contour=$MANIFEST/contour.yaml + kubectl apply -f $contour +} diff --git a/installer/common/dependencies.sh b/installer/common/dependencies.sh new file mode 100644 index 0000000..c2cfd12 --- /dev/null +++ b/installer/common/dependencies.sh @@ -0,0 +1,13 @@ +function install_dependencies() { + dependencies_download +} + +function dependencies_download() { + mkdir -p $DEPENDENCIES + if [ -z "$AIRGAP" ] || [ "$AIRGAP" != "1" ]; then + log_step "Downloading host dependencies" + pushd $DEPENDENCIES > /dev/null 2>&1 + package_download "openssl.tar.gz" "https://www.openssl.org/source/openssl-$OPENSSL_VERSION.tar.gz" + popd > /dev/null 2>&1 + fi +} diff --git a/installer/common/discover.sh b/installer/common/discover.sh new file mode 100644 index 0000000..ea36781 --- /dev/null +++ b/installer/common/discover.sh @@ -0,0 +1,36 @@ +LSB_DIST= +DIST_VERSION= +PRIVATE_IP= + +function discover() { + discover_lsb + discover_private_ip + + printf "\n--- System Info ---\n" + printf "Linux: $LSB_DIST\n" + printf "Version: $DIST_VERSION\n" + printf "Private IP: $PRIVATE_IP\n" + printf "\n" +} + +function discover_private_ip() { + if [ -n "$PRIVATE_ADDRESS" ]; then + return 0 + fi + + if command_exists "ifconfig"; then + PRIVATE_IP=$(ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1') + elif command_exists "ip"; then + PRIVATE_IP=$(ip -4 addr | grep -oP '(?<=inet\s)\d+(\.\d+){3}' | grep -v '127.0.0.1') + fi +} + + +function discover_lsb() { + if [ -f /etc/os-release ] && [ -r /etc/os-release ]; then + LSB_DIST="$(. /etc/os-release && echo "$ID")" + DIST_VERSION="$(. /etc/os-release && echo "$VERSION_ID")" + else + bail "Error: Unknown operating system." + fi +} diff --git a/installer/common/images.sh b/installer/common/images.sh new file mode 100644 index 0000000..678b50b --- /dev/null +++ b/installer/common/images.sh @@ -0,0 +1,46 @@ +function image_download() { + local name=$1 + local url=$2 + + printf "Downloading $name\n" + + if [ -f "$name" ]; then + printf "$name already downloaded\n" + return + fi + + if command_exists docker; then + docker pull $url > /dev/null 2>&1 + docker save $url | gzip > $name.tar.gz + elif command_exists ctr; then + ctr images pull --plain-http $url > /dev/null 2>&1 + ctr --namespace=default images export - $url | gzip > $name.tar.gz + else + log_warning "No support client installed for pulling images" + fi +} + +function images_download() { + log_step "Downloading images" + + mkdir -p $IMAGES + if [ -z "$AIRGAP" ] || [ "$AIRGAP" != "1" ]; then + pushd $IMAGES > /dev/null 2>&1 + image_download "coredns" $IMAGE_COREDNS + image_download "etcd" $IMAGE_ETCD + image_download "kube-apiserver" $IMAGE_KUBE_API + image_download "kube-controller-manager" $IMAGE_KUBE_CONTROLLER + image_download "kube-scheduler" $IMAGE_KUBE_SCHEDULER + image_download "kube-proxy" $IMAGE_KUBE_PROXY + image_download "pause" $IMAGE_PAUSE + + # ingress + image_download "contour" $IMAGE_CONTOUR + image_download "envoy" $IMAGE_ENVOY + popd > /dev/null 2>&1 + fi +} + +function images_load() { + find "$1" -type f | xargs -I {} bash -c "cat {} | gunzip | ctr -a $(kubeadm_get_containerd_sock) -n=k8s.io images import -" +} diff --git a/installer/common/kubeadm.sh b/installer/common/kubeadm.sh new file mode 100644 index 0000000..2444905 --- /dev/null +++ b/installer/common/kubeadm.sh @@ -0,0 +1,8 @@ +function kubeadm_api_is_healthy() { + addr=$PRIVATE_IP:6443 + curl --globoff --noproxy "*" --fail --silent --insecure "https://$addr/healthz" >/dev/null +} + +function kubeadm_get_containerd_sock() { + echo "/run/containerd/containerd.sock" +} \ No newline at end of file diff --git a/installer/common/kubernetes.sh b/installer/common/kubernetes.sh new file mode 100755 index 0000000..8cc55eb --- /dev/null +++ b/installer/common/kubernetes.sh @@ -0,0 +1,217 @@ +CNI_BIN=/opt/cni/bin + +function kubernetes_host() { + kubernetes_load_modules +} + +function kubernetes_packages_download() { + mkdir -p $PACKAGES + if [ -z "$AIRGAP" ] || [ "$AIRGAP" != "1" ]; then + log_step "Downloading packages" + pushd $PACKAGES > /dev/null 2>&1 + package_download "runc" "https://github.com/opencontainers/runc/releases/download/v$RUNC_VERSION/runc.$ARCH" + package_download "cni-plugins.tgz" "https://github.com/containernetworking/plugins/releases/download/v$CNI_PLUGINS_VERSION/cni-plugins-linux-$ARCH-v$CNI_PLUGINS_VERSION.tgz" + package_download "kubeadm" "https://dl.k8s.io/release/v$KUBERNETES_VERSION/bin/linux/$ARCH/kubeadm" + package_download "kubelet" "https://dl.k8s.io/release/v$KUBERNETES_VERSION/bin/linux/$ARCH/kubelet" + package_download "kubectl" "https://dl.k8s.io/release/v$KUBERNETES_VERSION/bin/linux/$ARCH/kubectl" + package_download "containerd.tar.gz" "https://github.com/containerd/containerd/releases/download/v$CONTAINERD_VERSION/containerd-$CONTAINERD_VERSION-linux-$ARCH.tar.gz" + package_download "crictl.tar.gz" "https://github.com/kubernetes-sigs/cri-tools/releases/download/v$CRICTL_VERSION/crictl-v$CRICTL_VERSION-linux-$ARCH.tar.gz" + popd > /dev/null 2>&1 + fi +} + +function kubernetes_install_packages() { + kubernetes_packages_download + + log_step "Installing packages" + pushd $PACKAGES > /dev/null 2>&1 + printf "Installing containerd\n" + tar -C /usr/local -xzf "$(package_filepath "containerd.tar.gz")" + kubernetes_configure_containerd_systemd + + printf "Installing runc\n" + install -m 755 $(package_filepath "runc") /usr/local/sbin/runc + + printf "Installing cni plugins\n" + mkdir -p $CNI_BIN + tar -C $CNI_BIN -xzf "$(package_filepath "cni-plugins.tgz")" + + printf "Installing crictl\n" + tar -C /usr/bin -xzf $(package_filepath "crictl.tar.gz") + chmod a+rx /usr/bin/crictl + + printf "Installing kubeadm\n" + cp -f "$(package_filepath "kubeadm")" /usr/bin/ + chmod a+rx /usr/bin/kubeadm + + printf "Installing kubectl\n" + cp -f "$(package_filepath "kubectl")" /usr/bin/ + chmod a+rx /usr/bin/kubectl + + + printf "Installing kubelet\n" + cp -f "$(package_filepath "kubelet")" /usr/bin/ + chmod a+rx /usr/bin/kubelet + kubernetes_configure_kubelet_systemd + popd > /dev/null 2>&1 + + printf "Loading Kubelet\n" + systemctl daemon-reload + systemctl enable kubelet && systemctl restart kubelet + + log_success "Kubernetes packages installed" +} + +function kubernetes_configure_containerd_systemd() { + mkdir -p /usr/local/lib/systemd/system + cat > "containerd.service" < "kubelet.service" < "10-kubeadm.conf" < /etc/modules-load.d/k8s.conf +overlay +br_netfilter + +ip_tables +ip6_tables + +ip_vs +ip_vs_rr +ip_vs_wrr +ip_vs_sh + +nf_conntrack +EOF + modprobe overlay + modprobe br_netfilter + + modprobe ip_tables + modprobe ip6_tables + + modprobe ip_vs + modprobe ip_vs_rr + modprobe ip_vs_wrr + modprobe ip_vs_sh + + modprobe nf_conntrack +} + +function kubernetes_load_sysctl() { + cat < /etc/sysctl.d/k8s-ipv4.conf +net.bridge.bridge-nf-call-iptables = 1 +net.ipv4.ip_forward = 1 + +net.bridge.bridge-nf-call-ip6tables = 1 +net.ipv6.ip_forward = 1 +EOF + + sysctl --system + + if [ "$(cat /proc/sys/net/ipv4/ip_forward)" = "0" ]; then + bail "Failed to enable IP4 forwarding." + fi + + if [ "$(cat /proc/sys/net/ipv6/ip_forward)" = "0" ]; then + bail "Failed to enable IP6 forwarding." + fi +} + +function kubernetes_has_packages() { + if ! command_exists kubelet; then + printf "kubelet command missing - will install host components\n" + return 1 + fi + if ! command_exists kubeadm; then + printf "kubeadm command missing - will install host components\n" + return 1 + fi + if ! command_exists kubectl; then + printf "kubectl command missing - will install host components\n" + return 1 + fi + if ! command_exists crictl; then + printf "crictl command missing - will install host components\n" + return 1 + fi +} + diff --git a/installer/common/logging.sh b/installer/common/logging.sh new file mode 100755 index 0000000..cd0f7e7 --- /dev/null +++ b/installer/common/logging.sh @@ -0,0 +1,35 @@ +GREEN='\033[0;32m' +BLUE='\033[0;94m' +LIGHT_BLUE='\033[0;34m' +YELLOW='\033[0;33m' +RED='\033[0;31m' +NC='\033[0m' # No Color + +function log_success() { + printf "${GREEN}✔ $1${NC}\n" 1>&2 +} + +function log() { + printf "%s\n" "$1" 1>&2 +} + +function log_step() { + printf "\n${BLUE}⚙ $1${NC}\n" 1>&2 +} + +function log_substep() { + printf "\t${LIGHT_BLUE}- $1${NC}\n" 1>&2 +} + +function log_fail() { + printf "${RED}$1${NC}\n" 1>&2 +} + +function log_warn() { + printf "${YELLOW}$1${NC}\n" 1>&2 +} + +function bail() { + log_fail "$@" + exit 1 +} \ No newline at end of file diff --git a/installer/common/packages.sh b/installer/common/packages.sh new file mode 100755 index 0000000..07d06ba --- /dev/null +++ b/installer/common/packages.sh @@ -0,0 +1,56 @@ +function package_filepath() { + local package="$1" + mkdir -p assets + echo "assets/${package}" +} + +function package_download_url_with_retry() { + local url="$1" + local filepath="$2" + local max_retries="${3:-10}" + + local errcode= + local i=0 + while [ $i -ne "$max_retries" ]; do + errcode=0 + curl -fL -o "${filepath}" "${url}" || errcode="$?" + # 18 transfer closed with outstanding read data remaining + # 56 recv failure (connection reset by peer) + if [ "$errcode" -eq "18" ] || [ "$errcode" -eq "56" ]; then + i=$(($i+1)) + continue + fi + return "$errcode" + done + return "$errcode" +} + +function package_download() { + local package="$1" + local package_url="$2" + + if [ -z "$package" ]; then + bail "package_download called with no package name" + fi + + mkdir -p assets + touch assets/Manifest + + local etag="$(grep -F "${package}" assets/Manifest | awk 'NR == 1 {print $2}')" + local checksum="$(grep -F "${package}" assets/Manifest | awk 'NR == 1 {print $3}')" + + local newetag="$(curl -IfsSL "$package_url" | grep -i 'etag:' | sed -r 's/.*"(.*)".*/\1/')" + if [ -n "${etag}" ] && [ "${etag}" = "${newetag}" ]; then + echo "Package ${package} already exists, not downloading" + return + fi + + log_step "Downloading ${package}" + + local filepath="$(package_filepath "${package}")" + package_download_url_with_retry "$package_url" "$filepath" + + checksum="$(md5sum "${filepath}" | awk '{print $1}')" + echo "${package} ${newetag} ${checksum}" >> assets/Manifest +} + diff --git a/installer/common/semver.sh b/installer/common/semver.sh new file mode 100755 index 0000000..1978667 --- /dev/null +++ b/installer/common/semver.sh @@ -0,0 +1,49 @@ + +function parse_semver() { + local semver="$1" + local prefix="$2" + + local -a parsed_ver + IFS='.' read -ra parsed_ver <<< "$semver" + + eval "${prefix}[0]=${parsed_ver[0]}" + eval "${prefix}[1]=${parsed_ver[1]}" + eval "${prefix}[2]=${parsed_ver[2]}" +} + +SEMVER_COMPARE_RESULT= +function semver_compare() { + semverParse "$1" + _a_major="${major:-0}" + _a_minor="${minor:-0}" + _a_patch="${patch:-0}" + semverParse "$2" + _b_major="${major:-0}" + _b_minor="${minor:-0}" + _b_patch="${patch:-0}" + if [ "$_a_major" -lt "$_b_major" ]; then + SEMVER_COMPARE_RESULT=-1 + return + fi + if [ "$_a_major" -gt "$_b_major" ]; then + SEMVER_COMPARE_RESULT=1 + return + fi + if [ "$_a_minor" -lt "$_b_minor" ]; then + SEMVER_COMPARE_RESULT=-1 + return + fi + if [ "$_a_minor" -gt "$_b_minor" ]; then + SEMVER_COMPARE_RESULT=1 + return + fi + if [ "$_a_patch" -lt "$_b_patch" ]; then + SEMVER_COMPARE_RESULT=-1 + return + fi + if [ "$_a_patch" -gt "$_b_patch" ]; then + SEMVER_COMPARE_RESULT=1 + return + fi + SEMVER_COMPARE_RESULT=0 +} diff --git a/installer/common/utils.sh b/installer/common/utils.sh new file mode 100755 index 0000000..61522bc --- /dev/null +++ b/installer/common/utils.sh @@ -0,0 +1,142 @@ +function require_root_user() { + local user="$(id -un 2>/dev/null || true)" + if [ "$user" != "root" ]; then + bail "Error: this installer needs to be run as root." + fi +} + +function path_add() { + if [ -d "$1" ] && [[ ":$PATH:" != *":$1:"* ]]; then + PATH="${PATH:+"$PATH:"}$1" + fi +} + +function command_exists() { + command -v "$@" > /dev/null 2>&1 +} + +function is_valid_ipv4() { + if echo "$1" | grep -qs '^[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*$'; then + return 0 + else + return 1 + fi +} + +function is_valid_ipv6() { + if echo "$1" | grep -qs "^\([0-9a-fA-F]\{0,4\}:\)\{1,7\}[0-9a-fA-F]\{0,4\}$"; then + return 0 + else + return 1 + fi +} + +function rm_file() { + if [ -f "$1" ]; then + rm $1 + fi +} + +function must_swap_off() { + if swap_is_on || swap_is_enabled; then + printf "\n${YELLOW}This application is incompatible with memory swapping enabled. Disable swap to continue?${NC} " + if confirmY ; then + printf "=> Running swapoff --all\n" + swapoff --all + if swap_fstab_enabled; then + swap_fstab_disable + fi + if swap_service_enabled; then + swap_service_disable + fi + if swap_azure_linux_agent_enabled; then + swap_azure_linux_agent_disable + fi + logSuccess "Swap disabled.\n" + else + bail "\nDisable swap with swapoff --all and remove all swap entries from /etc/fstab before re-running this script" + fi + fi +} + +function swap_is_on() { + swapon --summary | grep --quiet " " +} + +function swap_is_enabled() { + swap_fstab_enabled || swap_service_enabled || swap_azure_linux_agent_enabled +} + +function swap_fstab_enabled() { + cat /etc/fstab | grep --quiet --ignore-case --extended-regexp '^[^#]+swap' +} + +function swap_fstab_disable() { + printf "=> Commenting swap entries in /etc/fstab \n" + sed --in-place=.bak '/\bswap\b/ s/^/#/' /etc/fstab + printf "=> A backup of /etc/fstab has been made at /etc/fstab.bak\n\n" + printf "\n${YELLOW}Changes have been made to /etc/fstab. We recommend reviewing them after completing this installation to ensure mounts are correctly configured.${NC}\n\n" + sleep 5 # for emphasis of the above ^ +} + +# This is a service on some Azure VMs that just enables swap +function swap_service_enabled() { + systemctl -q is-enabled temp-disk-swapfile 2>/dev/null +} + +function swap_service_disable() { + printf "=> Disabling temp-disk-swapfile service\n" + systemctl disable temp-disk-swapfile +} + +function swap_azure_linux_agent_enabled() { + cat /etc/waagent.conf 2>/dev/null | grep -q 'ResourceDisk.EnableSwap=y' +} + +# retry a command if it fails up to $1 number of times +# Usage: cmd_retry 3 curl --globoff --noproxy "*" --fail --silent --insecure https://10.128.0.25:6443/healthz +function cmd_retry() { + local retries=$1 + shift + + local count=0 + until "$@"; do + exit=$? + wait=$((2 ** $count)) + count=$(($count + 1)) + if [ $count -lt $retries ]; then + echo "Retry $count/$retries exited $exit, retrying in $wait seconds..." + sleep $wait + else + echo "Retry $count/$retries exited $exit, no more retries left." + return $exit + fi + done + return 0 +} + +function spinner_until() { + local timeoutSeconds="$1" + local cmd="$2" + local args=${@:3} + + if [ -z "$timeoutSeconds" ]; then + timeoutSeconds=-1 + fi + + local delay=1 + local elapsed=0 + local spinstr='|/-\' + + while ! $cmd $args; do + elapsed=$((elapsed + delay)) + if [ "$timeoutSeconds" -ge 0 ] && [ "$elapsed" -gt "$timeoutSeconds" ]; then + return 1 + fi + local temp=${spinstr#?} + printf " [%c] " "$spinstr" + local spinstr=$temp${spinstr%"$temp"} + sleep $delay + printf "\b\b\b\b\b\b" + done +} \ No newline at end of file diff --git a/installer/configs/1.27.2.sh b/installer/configs/1.27.2.sh new file mode 100644 index 0000000..ec61cd1 --- /dev/null +++ b/installer/configs/1.27.2.sh @@ -0,0 +1,21 @@ +export AIRGAP=0 +export ARCH="amd64" + +export KUBERNETES_VERSION="1.27.2" +export CRICTL_VERSION="1.27.0" +export CNI_PLUGINS_VERSION="1.3.0" +export CONTAINERD_VERSION="1.7.1" +export RUNC_VERSION="1.1.7" + +export OPENSSL_VERSION="3.1.1" + +export IMAGE_COREDNS=registry.k8s.io/coredns/coredns:v1.10.1 +export IMAGE_ETCD=registry.k8s.io/etcd:3.5.7-0 +export IMAGE_KUBE_API=registry.k8s.io/kube-apiserver:v$KUBERNETES_VERSION +export IMAGE_KUBE_CONTROLLER=registry.k8s.io/kube-controller-manager:v$KUBERNETES_VERSION +export IMAGE_KUBE_PROXY=registry.k8s.io/kube-proxy:v$KUBERNETES_VERSION +export IMAGE_KUBE_SCHEDULER=registry.k8s.io/kube-scheduler:v$KUBERNETES_VERSION +export IMAGE_PAUSE=registry.k8s.io/pause:3.9 +export IMAGE_CONTOUR=ghcr.io/projectcontour/contour:v1.25.0 +export IMAGE_ENVOY=docker.io/envoyproxy/envoy:v1.26.1 +export IMAGE_ENVOY=docker.io/envoyproxy/envoy:v1.26.1 diff --git a/installer/configs/base.sh b/installer/configs/base.sh new file mode 100644 index 0000000..064f752 --- /dev/null +++ b/installer/configs/base.sh @@ -0,0 +1,5 @@ +export PACKAGES=$DIR/packages/kubernetes/$KUBERNETES_VERSION +export IMAGES=$DIR/packages/kubernetes/$KUBERNETES_VERSION/images +export MANIFEST=$DIR/packages/kubernetes/$KUBERNETES_VERSION/manifests +export DEPENDENCIES=$DIR/packages/deps +export HOSTNAME="$(hostname | tr '[:upper:]' '[:lower:]')" \ No newline at end of file diff --git a/installer/install.sh b/installer/install.sh new file mode 100755 index 0000000..d180be5 --- /dev/null +++ b/installer/install.sh @@ -0,0 +1,79 @@ +#!/bin/bash + +set -e + +DIR=. + +# +. $DIR/installer/configs/1.27.2.sh +. $DIR/installer/configs/base.sh +. $DIR/installer/common/kubernetes.sh +. $DIR/installer/common/addons.sh +. $DIR/installer/common/kubeadm.sh +. $DIR/installer/common/logging.sh +. $DIR/installer/common/discover.sh +. $DIR/installer/common/packages.sh +. $DIR/installer/common/semver.sh +. $DIR/installer/common/utils.sh +. $DIR/installer/common/dependencies.sh +. $DIR/installer/common/images.sh +# + +function setup() { + require_root_user + path_add "/usr/local/bin" + + if [ "$AIRGAP" = "1" ]; then + log_step "Running in airgapped enviroment." + fi + + kubernetes_load_modules + kubernetes_load_sysctl + must_swap_off + kubernetes_install_packages + images_download + images_load $IMAGES +} + +function init() { + set +o pipefail + + cmd_retry 3 kubeadm init \ + --ignore-preflight-errors="all" \ + | tee /tmp/kubeadm-init + + log_step "Waiting for kubernetes api health to report ok" + if ! spinner_until 120 kubeadm_api_is_healthy; then + bail "Kubernetes API failed to report healthy" + fi + + printf "\n\n" + + export KUBECONFIG=/etc/kubernetes/admin.conf + + kubectl cluster-info + log_success "Cluster initialized" +} + +# at this point kubectl should be configured. +function addons() { + addons_download + addons_install +} + +function main() { + log_step "Running install with the argument(s): $*" + + discover + setup + init + addons + + printf "\n" +} + +LOGS_DIR="$DIR/logs" +mkdir -p $LOGS_DIR +LOGFILE="$LOGS_DIR/install-$(date +"%Y-%m-%dT%H-%M-%S").log" + +main "$@" 2>&1 | tee $LOGFILE \ No newline at end of file diff --git a/legacy/terraform/aws/README.md b/legacy/terraform/aws/README.md old mode 100644 new mode 100755 diff --git a/legacy/terraform/aws/infra/infra.tf b/legacy/terraform/aws/infra/infra.tf old mode 100644 new mode 100755 diff --git a/legacy/terraform/aws/kube/kube.tf b/legacy/terraform/aws/kube/kube.tf old mode 100644 new mode 100755 diff --git a/legacy/terraform/aws/local.tf b/legacy/terraform/aws/local.tf old mode 100644 new mode 100755 diff --git a/legacy/terraform/azure/README.md b/legacy/terraform/azure/README.md old mode 100644 new mode 100755 diff --git a/legacy/terraform/azure/infra/main.tf b/legacy/terraform/azure/infra/main.tf old mode 100644 new mode 100755 diff --git a/legacy/terraform/azure/infra/outputs.tf b/legacy/terraform/azure/infra/outputs.tf old mode 100644 new mode 100755 diff --git a/legacy/terraform/azure/infra/variables.tf b/legacy/terraform/azure/infra/variables.tf old mode 100644 new mode 100755 diff --git a/legacy/terraform/azure/kube/main.tf b/legacy/terraform/azure/kube/main.tf old mode 100644 new mode 100755 diff --git a/legacy/terraform/azure/kube_yaml/main.tf b/legacy/terraform/azure/kube_yaml/main.tf old mode 100644 new mode 100755 diff --git a/legacy/terraform/azure/local.tf b/legacy/terraform/azure/local.tf old mode 100644 new mode 100755 diff --git a/package.json b/package.json old mode 100644 new mode 100755 index 9327f9a..fc6e7da --- a/package.json +++ b/package.json @@ -3,36 +3,21 @@ "version": "0.34.0", "description": "W&B Local is the self hosted version of Weights & Biases", "repository": "git@github.com:wandb/local.git", - "author": "Chris Van Pelt ", + "author": "Weights & Biases ", "license": "MIT", "private": true, - "devDependencies": { - "@octokit/rest": "^18.0.6", - "@types/lodash": "^4.14.171", - "auto-release-notes": "git://github.com/wandb/auto-release-notes.git#v0.2.0", - "lodash": "^4.17.21", - "release-it": "^14.1.0", - "typescript": "^4.3.5" + "scripts": { + "build:installer": "ts-node installer/build.ts", + "build:installer:airgap": "AIRGAP=true ts-node installer/build.ts", + "bundle:installer": "bash installer/bundle.sh" + }, + "dependencies": { + "typescript": "^5.0.4" }, - "release-it": { - "git": { - "requireCleanWorkingDir": false - }, - "github": { - "release": true, - "releaseNotes": "cat ./staging/RELEASE.md" - }, - "npm": { - "publish": false, - "release": false - }, - "hooks": { - "before:init": "rm -rf ./staging/RELEASE.md" - }, - "plugins": { - "./js/plugins/release.js": { - "legacy": false - } - } + "devDependencies": { + "@types/node": "^20.2.5", + "eslint": "^8.41.0", + "prettier": "^2.8.8", + "ts-node": "^10.9.1" } } \ No newline at end of file diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml new file mode 100755 index 0000000..c7d58b7 --- /dev/null +++ b/pnpm-lock.yaml @@ -0,0 +1,772 @@ +lockfileVersion: '6.0' + +dependencies: + typescript: + specifier: ^5.0.4 + version: 5.0.4 + +devDependencies: + '@types/node': + specifier: ^20.2.5 + version: 20.2.5 + eslint: + specifier: ^8.41.0 + version: 8.41.0 + prettier: + specifier: ^2.8.8 + version: 2.8.8 + ts-node: + specifier: ^10.9.1 + version: 10.9.1(@types/node@20.2.5)(typescript@5.0.4) + +packages: + + /@cspotcode/source-map-support@0.8.1: + resolution: {integrity: sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==} + engines: {node: '>=12'} + dependencies: + '@jridgewell/trace-mapping': 0.3.9 + dev: true + + /@eslint-community/eslint-utils@4.4.0(eslint@8.41.0): + resolution: {integrity: sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + dependencies: + eslint: 8.41.0 + eslint-visitor-keys: 3.4.1 + dev: true + + /@eslint-community/regexpp@4.5.1: + resolution: {integrity: sha512-Z5ba73P98O1KUYCCJTUeVpja9RcGoMdncZ6T49FCUl2lN38JtCJ+3WgIDBv0AuY4WChU5PmtJmOCTlN6FZTFKQ==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + dev: true + + /@eslint/eslintrc@2.0.3: + resolution: {integrity: sha512-+5gy6OQfk+xx3q0d6jGZZC3f3KzAkXc/IanVxd1is/VIIziRqqt3ongQz0FiTUXqTk0c7aDB3OaFuKnuSoJicQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + ajv: 6.12.6 + debug: 4.3.4 + espree: 9.5.2 + globals: 13.20.0 + ignore: 5.2.4 + import-fresh: 3.3.0 + js-yaml: 4.1.0 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + dev: true + + /@eslint/js@8.41.0: + resolution: {integrity: sha512-LxcyMGxwmTh2lY9FwHPGWOHmYFCZvbrFCBZL4FzSSsxsRPuhrYUg/49/0KDfW8tnIEaEHtfmn6+NPN+1DqaNmA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dev: true + + /@humanwhocodes/config-array@0.11.8: + resolution: {integrity: sha512-UybHIJzJnR5Qc/MsD9Kr+RpO2h+/P1GhOwdiLPXK5TWk5sgTdu88bTD9UP+CKbPPh5Rni1u0GjAdYQLemG8g+g==} + engines: {node: '>=10.10.0'} + dependencies: + '@humanwhocodes/object-schema': 1.2.1 + debug: 4.3.4 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + dev: true + + /@humanwhocodes/module-importer@1.0.1: + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + dev: true + + /@humanwhocodes/object-schema@1.2.1: + resolution: {integrity: sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==} + dev: true + + /@jridgewell/resolve-uri@3.1.1: + resolution: {integrity: sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==} + engines: {node: '>=6.0.0'} + dev: true + + /@jridgewell/sourcemap-codec@1.4.15: + resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} + dev: true + + /@jridgewell/trace-mapping@0.3.9: + resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} + dependencies: + '@jridgewell/resolve-uri': 3.1.1 + '@jridgewell/sourcemap-codec': 1.4.15 + dev: true + + /@nodelib/fs.scandir@2.1.5: + resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} + engines: {node: '>= 8'} + dependencies: + '@nodelib/fs.stat': 2.0.5 + run-parallel: 1.2.0 + dev: true + + /@nodelib/fs.stat@2.0.5: + resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} + engines: {node: '>= 8'} + dev: true + + /@nodelib/fs.walk@1.2.8: + resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} + engines: {node: '>= 8'} + dependencies: + '@nodelib/fs.scandir': 2.1.5 + fastq: 1.15.0 + dev: true + + /@tsconfig/node10@1.0.9: + resolution: {integrity: sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==} + dev: true + + /@tsconfig/node12@1.0.11: + resolution: {integrity: sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==} + dev: true + + /@tsconfig/node14@1.0.3: + resolution: {integrity: sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==} + dev: true + + /@tsconfig/node16@1.0.4: + resolution: {integrity: sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==} + dev: true + + /@types/node@20.2.5: + resolution: {integrity: sha512-JJulVEQXmiY9Px5axXHeYGLSjhkZEnD+MDPDGbCbIAbMslkKwmygtZFy1X6s/075Yo94sf8GuSlFfPzysQrWZQ==} + dev: true + + /acorn-jsx@5.3.2(acorn@8.8.2): + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + dependencies: + acorn: 8.8.2 + dev: true + + /acorn-walk@8.2.0: + resolution: {integrity: sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==} + engines: {node: '>=0.4.0'} + dev: true + + /acorn@8.8.2: + resolution: {integrity: sha512-xjIYgE8HBrkpd/sJqOGNspf8uHG+NOHGOw6a/Urj8taM2EXfdNAH2oFcPeIFfsv3+kz/mJrS5VuMqbNLjCa2vw==} + engines: {node: '>=0.4.0'} + hasBin: true + dev: true + + /ajv@6.12.6: + resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + dev: true + + /ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + dev: true + + /ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + dependencies: + color-convert: 2.0.1 + dev: true + + /arg@4.1.3: + resolution: {integrity: sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==} + dev: true + + /argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + dev: true + + /balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + dev: true + + /brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + dev: true + + /callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + dev: true + + /chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + dev: true + + /color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + dependencies: + color-name: 1.1.4 + dev: true + + /color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + dev: true + + /concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + dev: true + + /create-require@1.1.1: + resolution: {integrity: sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==} + dev: true + + /cross-spawn@7.0.3: + resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==} + engines: {node: '>= 8'} + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + dev: true + + /debug@4.3.4: + resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + dependencies: + ms: 2.1.2 + dev: true + + /deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + dev: true + + /diff@4.0.2: + resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==} + engines: {node: '>=0.3.1'} + dev: true + + /doctrine@3.0.0: + resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==} + engines: {node: '>=6.0.0'} + dependencies: + esutils: 2.0.3 + dev: true + + /escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + dev: true + + /eslint-scope@7.2.0: + resolution: {integrity: sha512-DYj5deGlHBfMt15J7rdtyKNq/Nqlv5KfU4iodrQ019XESsRnwXH9KAE0y3cwtUHDo2ob7CypAnCqefh6vioWRw==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + dev: true + + /eslint-visitor-keys@3.4.1: + resolution: {integrity: sha512-pZnmmLwYzf+kWaM/Qgrvpen51upAktaaiI01nsJD/Yr3lMOdNtq0cxkrrg16w64VtisN6okbs7Q8AfGqj4c9fA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dev: true + + /eslint@8.41.0: + resolution: {integrity: sha512-WQDQpzGBOP5IrXPo4Hc0814r4/v2rrIsB0rhT7jtunIalgg6gYXWhRMOejVO8yH21T/FGaxjmFjBMNqcIlmH1Q==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + hasBin: true + dependencies: + '@eslint-community/eslint-utils': 4.4.0(eslint@8.41.0) + '@eslint-community/regexpp': 4.5.1 + '@eslint/eslintrc': 2.0.3 + '@eslint/js': 8.41.0 + '@humanwhocodes/config-array': 0.11.8 + '@humanwhocodes/module-importer': 1.0.1 + '@nodelib/fs.walk': 1.2.8 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.3 + debug: 4.3.4 + doctrine: 3.0.0 + escape-string-regexp: 4.0.0 + eslint-scope: 7.2.0 + eslint-visitor-keys: 3.4.1 + espree: 9.5.2 + esquery: 1.5.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 6.0.1 + find-up: 5.0.0 + glob-parent: 6.0.2 + globals: 13.20.0 + graphemer: 1.4.0 + ignore: 5.2.4 + import-fresh: 3.3.0 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + is-path-inside: 3.0.3 + js-yaml: 4.1.0 + json-stable-stringify-without-jsonify: 1.0.1 + levn: 0.4.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.1 + strip-ansi: 6.0.1 + strip-json-comments: 3.1.1 + text-table: 0.2.0 + transitivePeerDependencies: + - supports-color + dev: true + + /espree@9.5.2: + resolution: {integrity: sha512-7OASN1Wma5fum5SrNhFMAMJxOUAbhyfQ8dQ//PJaJbNw0URTPWqIghHWt1MmAANKhHZIYOHruW4Kw4ruUWOdGw==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + acorn: 8.8.2 + acorn-jsx: 5.3.2(acorn@8.8.2) + eslint-visitor-keys: 3.4.1 + dev: true + + /esquery@1.5.0: + resolution: {integrity: sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==} + engines: {node: '>=0.10'} + dependencies: + estraverse: 5.3.0 + dev: true + + /esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + dependencies: + estraverse: 5.3.0 + dev: true + + /estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + dev: true + + /esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + dev: true + + /fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + dev: true + + /fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + dev: true + + /fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + dev: true + + /fastq@1.15.0: + resolution: {integrity: sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==} + dependencies: + reusify: 1.0.4 + dev: true + + /file-entry-cache@6.0.1: + resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} + engines: {node: ^10.12.0 || >=12.0.0} + dependencies: + flat-cache: 3.0.4 + dev: true + + /find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + dev: true + + /flat-cache@3.0.4: + resolution: {integrity: sha512-dm9s5Pw7Jc0GvMYbshN6zchCA9RgQlzzEZX3vylR9IqFfS8XciblUXOKfW6SiuJ0e13eDYZoZV5wdrev7P3Nwg==} + engines: {node: ^10.12.0 || >=12.0.0} + dependencies: + flatted: 3.2.7 + rimraf: 3.0.2 + dev: true + + /flatted@3.2.7: + resolution: {integrity: sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==} + dev: true + + /fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + dev: true + + /glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + dependencies: + is-glob: 4.0.3 + dev: true + + /glob@7.2.3: + resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + dev: true + + /globals@13.20.0: + resolution: {integrity: sha512-Qg5QtVkCy/kv3FUSlu4ukeZDVf9ee0iXLAUYX13gbR17bnejFTzr4iS9bY7kwCf1NztRNm1t91fjOiyx4CSwPQ==} + engines: {node: '>=8'} + dependencies: + type-fest: 0.20.2 + dev: true + + /graphemer@1.4.0: + resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} + dev: true + + /has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + dev: true + + /ignore@5.2.4: + resolution: {integrity: sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==} + engines: {node: '>= 4'} + dev: true + + /import-fresh@3.3.0: + resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==} + engines: {node: '>=6'} + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + dev: true + + /imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + dev: true + + /inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + dev: true + + /inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + dev: true + + /is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + dev: true + + /is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + dependencies: + is-extglob: 2.1.1 + dev: true + + /is-path-inside@3.0.3: + resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==} + engines: {node: '>=8'} + dev: true + + /isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + dev: true + + /js-yaml@4.1.0: + resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} + hasBin: true + dependencies: + argparse: 2.0.1 + dev: true + + /json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + dev: true + + /json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + dev: true + + /levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + dev: true + + /locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + dependencies: + p-locate: 5.0.0 + dev: true + + /lodash.merge@4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + dev: true + + /make-error@1.3.6: + resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==} + dev: true + + /minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + dependencies: + brace-expansion: 1.1.11 + dev: true + + /ms@2.1.2: + resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==} + dev: true + + /natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + dev: true + + /once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + dependencies: + wrappy: 1.0.2 + dev: true + + /optionator@0.9.1: + resolution: {integrity: sha512-74RlY5FCnhq4jRxVUPKDaRwrVNXMqsGsiW6AJw4XK8hmtm10wC0ypZBLw5IIp85NZMr91+qd1RvvENwg7jjRFw==} + engines: {node: '>= 0.8.0'} + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + word-wrap: 1.2.3 + dev: true + + /p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + dependencies: + yocto-queue: 0.1.0 + dev: true + + /p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + dependencies: + p-limit: 3.1.0 + dev: true + + /parent-module@1.0.1: + resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} + engines: {node: '>=6'} + dependencies: + callsites: 3.1.0 + dev: true + + /path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + dev: true + + /path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + dev: true + + /path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + dev: true + + /prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + dev: true + + /prettier@2.8.8: + resolution: {integrity: sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q==} + engines: {node: '>=10.13.0'} + hasBin: true + dev: true + + /punycode@2.3.0: + resolution: {integrity: sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==} + engines: {node: '>=6'} + dev: true + + /queue-microtask@1.2.3: + resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + dev: true + + /resolve-from@4.0.0: + resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} + engines: {node: '>=4'} + dev: true + + /reusify@1.0.4: + resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} + engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + dev: true + + /rimraf@3.0.2: + resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==} + hasBin: true + dependencies: + glob: 7.2.3 + dev: true + + /run-parallel@1.2.0: + resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + dependencies: + queue-microtask: 1.2.3 + dev: true + + /shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + dependencies: + shebang-regex: 3.0.0 + dev: true + + /shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + dev: true + + /strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + dependencies: + ansi-regex: 5.0.1 + dev: true + + /strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + dev: true + + /supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + dependencies: + has-flag: 4.0.0 + dev: true + + /text-table@0.2.0: + resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} + dev: true + + /ts-node@10.9.1(@types/node@20.2.5)(typescript@5.0.4): + resolution: {integrity: sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==} + hasBin: true + peerDependencies: + '@swc/core': '>=1.2.50' + '@swc/wasm': '>=1.2.50' + '@types/node': '*' + typescript: '>=2.7' + peerDependenciesMeta: + '@swc/core': + optional: true + '@swc/wasm': + optional: true + dependencies: + '@cspotcode/source-map-support': 0.8.1 + '@tsconfig/node10': 1.0.9 + '@tsconfig/node12': 1.0.11 + '@tsconfig/node14': 1.0.3 + '@tsconfig/node16': 1.0.4 + '@types/node': 20.2.5 + acorn: 8.8.2 + acorn-walk: 8.2.0 + arg: 4.1.3 + create-require: 1.1.1 + diff: 4.0.2 + make-error: 1.3.6 + typescript: 5.0.4 + v8-compile-cache-lib: 3.0.1 + yn: 3.1.1 + dev: true + + /type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + dependencies: + prelude-ls: 1.2.1 + dev: true + + /type-fest@0.20.2: + resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==} + engines: {node: '>=10'} + dev: true + + /typescript@5.0.4: + resolution: {integrity: sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==} + engines: {node: '>=12.20'} + hasBin: true + + /uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + dependencies: + punycode: 2.3.0 + dev: true + + /v8-compile-cache-lib@3.0.1: + resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==} + dev: true + + /which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + dependencies: + isexe: 2.0.0 + dev: true + + /word-wrap@1.2.3: + resolution: {integrity: sha512-Hz/mrNwitNRh/HUAtM/VT/5VH+ygD6DV7mYKZAtHOrbs8U7lvPS6xf7EJKMF0uW1KJCl0H701g3ZGus+muE5vQ==} + engines: {node: '>=0.10.0'} + dev: true + + /wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + dev: true + + /yn@3.1.1: + resolution: {integrity: sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==} + engines: {node: '>=6'} + dev: true + + /yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + dev: true diff --git a/test.sh b/test.sh new file mode 100755 index 0000000..79ab72b --- /dev/null +++ b/test.sh @@ -0,0 +1,9761 @@ +#!/bin/bash + +set -e +set -E # cause 'trap funcname ERR' to be inherited by child commands, see https://stackoverflow.com/questions/35800082/how-to-trap-err-when-using-set-e-in-bash + +MASTER=1 +DIR=. + + +KURL_URL="https://kurl.sh" +DIST_URL="https://s3.kurl.sh/dist" +FALLBACK_URL="https://kurl-sh.s3.amazonaws.com/dist" +INSTALLER_ID="latest" +KURL_VERSION="v2023.05.30-0" +CRICTL_VERSION=1.20.0 +REPLICATED_APP_URL="https://replicated.app" +KURL_UTIL_IMAGE="replicated/kurl-util:v2023.05.30-0" +KURL_BIN_UTILS_FILE="kurl-bin-utils-v2023.05.30-0.tar.gz" +# STEP_VERSIONS array is generated by the server and injected at runtime based on supported k8s versions +STEP_VERSIONS=(0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 1.16.4 1.17.13 1.18.20 1.19.16 1.20.15 1.21.14 1.22.17 1.23.17 1.24.14 1.25.10 1.26.5 1.27.2) +# ROOK_STEP_VERSIONS array is generated by the server and injected at runtime based on supported rook versions +ROOK_STEP_VERSIONS=(1.0.4-14.2.21 0.0.0 0.0.0 0.0.0 1.4.9 1.5.12 1.6.11 1.7.11 1.8.10 1.9.12 1.10.11 1.11.6) +# CONTAINERD_STEP_VERSIONS array is generated by the server and injected at runtime based on supported containerd versions +CONTAINERD_STEP_VERSIONS=(0.0.0 0.0.0 1.2.13 1.3.9 1.4.13 1.5.11 1.6.21) +INSTALLER_YAML="apiVersion: cluster.kurl.sh/v1beta1 +kind: Installer +metadata: + name: latest +spec: + kubernetes: + version: 1.27.2 + flannel: + version: 0.21.5 + openebs: + version: 3.6.0 + isLocalPVEnabled: true + localPVStorageClassName: local + minio: + version: 2023-05-18T00-05-36Z + contour: + version: 1.25.0 + registry: + version: 2.8.2 + prometheus: + version: 0.65.1-45.28.0 + ekco: + version: 0.27.1 + kurl: + installerVersion: v2023.05.30-0 + additionalNoProxyAddresses: [] + containerd: + version: 1.6.21 +" + +# shellcheck disable=SC2148 +# no shebang as this is a composite script + +function kurl_init_config() { + if kubernetes_resource_exists kurl configmap kurl-current-config; then + kubectl delete configmap -n kurl kurl-last-config || true + kubectl get configmap -n kurl -o json kurl-current-config | sed 's/kurl-current-config/kurl-last-config/g' | kubectl apply -f - + kubectl delete configmap -n kurl kurl-current-config || true + else + kubectl create configmap -n kurl kurl-last-config + fi + + kubectl create configmap -n kurl kurl-current-config + + kurl_set_current_version +} + +function kurl_set_current_version() { + if [ -z "${KURL_VERSION}" ]; then + return + fi + kubectl patch configmaps -n kurl kurl-current-config --type merge -p "{\"data\":{\"kurl-version\":\"${KURL_VERSION}\"}}" +} + +function kurl_get_current_version() { + kubectl get configmap -n kurl kurl-current-config -o jsonpath="{.data.kurl-version}" +} + +function kurl_get_last_version() { + kubectl get configmap -n kurl kurl-last-config -o jsonpath="{.data.kurl-version}" +} + + +#!/bin/bash + +ADDONS_HAVE_HOST_COMPONENTS=0 +function addon_install() { + local name=$1 + local version=$2 + + if [ -z "$version" ]; then + return 0 + fi + + logStep "Addon $name $version" + + report_addon_start "$name" "$version" + + rm -rf $DIR/kustomize/$name + mkdir -p $DIR/kustomize/$name + + export REPORTING_CONTEXT_INFO="addon $name $version" + + # shellcheck disable=SC1090 + addon_source "$name" "$version" + + # containerd is a special case because there is also a binary named containerd on the host + if [ "$name" = "containerd" ]; then + containerd_install + else + $name + fi + export REPORTING_CONTEXT_INFO="" + + addon_set_has_been_applied $name + + if commandExists ${name}_join; then + ADDONS_HAVE_HOST_COMPONENTS=1 + fi + if [ "$name" = "containerd" ]; then + ADDONS_HAVE_HOST_COMPONENTS=1 + fi + + report_addon_success "$name" "$version" +} + +function addon_fetch() { + local name=$1 + local version=$2 + local s3Override=$3 + + if [ -z "$version" ]; then + return 0 + fi + + if [ "$AIRGAP" != "1" ]; then + if [ -n "$s3Override" ]; then + rm -rf $DIR/addons/$name/$version # Cleanup broken/incompatible addons from failed runs + addon_fetch_cache "$name-$version.tar.gz" "$s3Override" + elif [ -n "$DIST_URL" ]; then + rm -rf $DIR/addons/$name/$version # Cleanup broken/incompatible addons from failed runs + addon_fetch_cache "$name-$version.tar.gz" + fi + fi + + addon_source "$name" "$version" +} + +function addon_pre_init() { + local name=$1 + + if commandExists ${name}_pre_init; then + ${name}_pre_init + fi +} + +function addon_post_init() { + local name=$1 + + if commandExists "${name}_post_init"; then + "${name}_post_init" + fi +} + +function addon_preflight() { + local name=$1 + local version=$2 # will be unset if addon is not part of the installer + + if [ -z "$name" ] || [ -z "$version" ]; then + return + fi + + local addonRoot="${DIR}/addons/${name}/${version}" + if [ ! -d "$addonRoot" ]; then + return + fi + + local src="${addonRoot}/host-preflight.yaml" + if [ -f "$src" ]; then + echo "$src" + fi + + if [ "${SKIP_SYSTEM_PACKAGE_INSTALL}" == "1" ]; then + preflights_system_packages "$name" "$version" + fi +} + +function addon_join() { + local name=$1 + local version=$2 + + addon_load "$name" "$version" + + if commandExists ${name}_join; then + logStep "Addon $name $version" + ${name}_join + fi +} + +function addon_exists() { + local name=$1 + local version=$2 + [ -d "$DIR/addons/$name/$version" ] +} + +function addon_load() { + local name=$1 + local version=$2 + + if [ -z "$version" ]; then + return 0 + fi + + load_images $DIR/addons/$name/$version/images +} + +function addon_fetch_no_cache() { + local url=$1 + + local archiveName=$(basename $url) + + echo "Fetching $archiveName" + curl -LO "$url" + tar xf $archiveName + rm $archiveName +} + +function addon_fetch_cache() { + local package=$1 + local url_override=$2 + + package_download "${package}" "${url_override}" + + tar xf "$(package_filepath "${package}")" + + # rm $archiveName +} + +# addon_fetch_airgap checks if the files are already present - if they are, use that +# if they are not, prompt the user to provide them +# if the user does not provide the files, bail +function addon_fetch_airgap() { + local name=$1 + local version=$2 + local package_name="$name-$version.tar.gz" + local package_path= + package_path="$(package_filepath "$package_name")" + + if [ -f "$package_path" ]; then + # the package already exists, no need to download it + printf "The package %s %s is already available locally.\n" "$name" "$version" + else + package_path="$(find assets/*"$name-$version"*.tar.gz 2>/dev/null | head -n 1)" + if [ -n "$package_path" ]; then + # the package already exists, no need to download it + printf "The package %s is already available locally.\n" "$(basename "$package_path")" + else + # prompt the user to give us the package + printf "The package %s %s is not available locally, and is required.\n" "$name" "$version" + printf "\nYou can download it with the following command:\n" + printf "\n${GREEN} curl -LO %s${NC}\n\n" "$(get_dist_url)/$package_name" + + addon_fetch_airgap_prompt_for_package "$package_name" + fi + fi + + printf "Unpacking %s %s...\n" "$name" "$version" + tar xf "$package_path" + + # do not source the addon here as the kubernetes "addon" uses this function but is not an addon +} + +# addon_fetch_multiple_airgap checks if the files are already present - if they are, use that +# if they are not, prompt the user to provide them as a single package +# if the user does not provide the files, bail +# exports the package filepath for later cleanup +function addon_fetch_multiple_airgap() { + local addon_versions=( "$@" ) + local missing_addon_versions=() + export AIRGAP_MULTI_ADDON_PACKAGE_PATH= + for addon_version in "${addon_versions[@]}"; do + local name=, version= + name=$(echo "$addon_version" | cut -d- -f1) + version=$(echo "$addon_version" | cut -d- -f2) + local package_name="$name-$version.tar.gz" + local package_path= + package_path="$(package_filepath "$package_name")" + if [ -f "$package_path" ]; then + # the package already exists, no need to download it + printf "The package %s %s is already available locally.\n" "$name" "$version" + + printf "Unpacking %s...\n" "$package_name" + if ! tar xf "$package_path" ; then + bail "Failed to unpack $package_name" + fi + else + # the package does not exist, add it to the list of missing packages + missing_addon_versions+=("$addon_version") + fi + done + + if [ "${#missing_addon_versions[@]}" -gt 0 ]; then + local package_list= + package_list=$(printf ",%s" "${missing_addon_versions[@]}") # join with commas + package_list="${package_list:1}" + local package_name="$package_list.tar.gz" + local package_path= + package_path="$(package_filepath "$package_name")" + AIRGAP_MULTI_ADDON_PACKAGE_PATH="$package_path" + + if [ -f "$package_path" ]; then + # the package already exists, no need to download it + printf "The package %s is already available locally.\n" "$package_name" + else + local bundle_url="$KURL_URL/bundle" + if [ -n "$KURL_VERSION" ]; then + bundle_url="$bundle_url/version/$KURL_VERSION" + fi + bundle_url="$bundle_url/$INSTALLER_ID/packages/$package_name" + + printf "The following packages are not available locally, and are required:\n" + # prompt the user to give us the packages + for addon_version in "${missing_addon_versions[@]}"; do + printf " %s\n" "$addon_version.tar.gz" + done + printf "\nYou can download them with the following command:\n" + printf "\n${GREEN} curl -LO %s${NC}\n\n" "$bundle_url" + + addon_fetch_airgap_prompt_for_package "$package_name" + fi + + printf "Unpacking %s...\n" "$package_name" + if ! tar xf "$package_path" ; then + bail "Failed to unpack $package_name" + fi + + # do not source the addon here as we are loading multiple addons that may conflict + # also the kubernetes "addon" uses this function but is not an addon + fi +} + +# addon_fetch_airgap_prompt_for_package prompts the user do download a package +function addon_fetch_airgap_prompt_for_package() { + local package_name="$1" + local package_path= + package_path=$(package_filepath "$package_name") + + if ! prompts_can_prompt; then + # we can't ask the user to give us the file because there are no prompts, but we can say where to put it for a future run + bail "Please move this file to $KURL_INSTALL_DIRECTORY/$package_path before rerunning the installer." + fi + + printf "Please provide the path to the file on the server.\n" + printf "Absolute path to file: " + prompt + if [ -n "$PROMPT_RESULT" ]; then + local loaded_package_path="$PROMPT_RESULT" + if [ ! -f "$loaded_package_path" ]; then + bail "The file $loaded_package_path does not exist." + fi + mkdir -p "$(dirname "$package_path")" + log "Copying $loaded_package_path to $package_path" + cp "$loaded_package_path" "$package_path" + else + logFail "Package $package_name not provided." + logFail "You can provide the path to this file the next time the installer is run," + bail "or move it to $KURL_INSTALL_DIRECTORY/$package_path to be detected automatically.\n" + fi +} + +function addon_outro() { + if [ -n "$PROXY_ADDRESS" ]; then + ADDONS_HAVE_HOST_COMPONENTS=1 + fi + + if [ "$ADDONS_HAVE_HOST_COMPONENTS" = "1" ] && kubernetes_has_remotes; then + local common_flags + common_flags="${common_flags}$(get_docker_registry_ip_flag "${DOCKER_REGISTRY_IP}")" + + local no_proxy_addresses="" + [ -n "$ADDITIONAL_NO_PROXY_ADDRESSES" ] && no_proxy_addresses="$ADDITIONAL_NO_PROXY_ADDRESSES" + [ -n "${SERVICE_CIDR}" ] && no_proxy_addresses="${no_proxy_addresses:+$no_proxy_addresses,}${SERVICE_CIDR}" + [ -n "${POD_CIDR}" ] && no_proxy_addresses="${no_proxy_addresses:+$no_proxy_addresses,}${POD_CIDR}" + [ -n "$no_proxy_addresses" ] && common_flags="${common_flags}$(get_additional_no_proxy_addresses_flag 1 "$no_proxy_addresses")" + + common_flags="${common_flags}$(get_kurl_install_directory_flag "${KURL_INSTALL_DIRECTORY_FLAG}")" + common_flags="${common_flags}$(get_skip_system_package_install_flag)" + common_flags="${common_flags}$(get_exclude_builtin_host_preflights_flag)" + common_flags="${common_flags}$(get_remotes_flags)" + + printf "\n${YELLOW}Run this script on all remote nodes to apply changes${NC}\n" + if [ "$AIRGAP" = "1" ]; then + printf "\n\t${GREEN}cat ./upgrade.sh | sudo bash -s airgap${common_flags}${NC}\n\n" + else + local prefix= + prefix="$(build_installer_prefix "${INSTALLER_ID}" "${KURL_VERSION}" "${KURL_URL}" "${PROXY_ADDRESS}")" + + printf "\n\t${GREEN}${prefix}upgrade.sh | sudo bash -s${common_flags}${NC}\n\n" + fi + + if [ "${KURL_IGNORE_REMOTE_UPGRADE_PROMPT}" != "1" ]; then + if prompts_can_prompt ; then + echo "Press enter to proceed" + prompt + fi + else + logWarn "Remote upgrade script prompt explicitly ignored" + fi + fi + + while read -r name; do + if commandExists ${name}_outro; then + ${name}_outro + fi + done < <(find addons/ -mindepth 1 -maxdepth 1 -type d -printf '%f\n') +} + +function addon_cleanup() { + rm -rf "${DIR}/addons" +} + +function addon_has_been_applied() { + local name=$1 + + if [ "$name" = "containerd" ]; then + if [ -f $DIR/containerd-last-applied ]; then + last_applied=$(cat $DIR/containerd-last-applied) + fi + else + last_applied=$(kubectl get configmap -n kurl kurl-last-config -o jsonpath="{.data.addons-$name}") + fi + + current=$(get_addon_config "$name" | base64 -w 0) + + if [[ "$current" == "" ]] ; then + # current should never be the empty string - it should at least contain the version - so this indicates an error + # it would be better to reinstall unnecessarily rather than skip installing, so we report that the addon has not been applied + return 1 + fi + + if [[ "$last_applied" == "$current" ]] ; then + return 0 + fi + + return 1 +} + +function addon_set_has_been_applied() { + local name=$1 + current=$(get_addon_config "$name" | base64 -w 0) + + if [ "$name" = "containerd" ]; then + echo "$current" > $DIR/containerd-last-applied + else + kubectl patch configmaps -n kurl kurl-current-config --type merge -p "{\"data\":{\"addons-$name\":\"$current\"}}" + fi +} + +function addon_source() { + local name=$1 + local version=$2 + # shellcheck disable=SC1090 + . "$DIR/addons/$name/$version/install.sh" +} + +GREEN='\033[0;32m' +BLUE='\033[0;94m' +LIGHT_BLUE='\033[0;34m' +YELLOW='\033[0;33m' +RED='\033[0;31m' +NC='\033[0m' # No Color + +KUBEADM_CONF_DIR=/opt/replicated +KUBEADM_CONF_FILE="$KUBEADM_CONF_DIR/kubeadm.conf" + +function commandExists() { + command -v "$@" > /dev/null 2>&1 +} + +function get_dist_url() { + local url="$DIST_URL" + if [ -n "${KURL_VERSION}" ]; then + url="${DIST_URL}/${KURL_VERSION}" + fi + echo "$url" +} + +# default s3 endpoint does not have AAAA records so IPv6 installs have to choose +# an arbitrary regional dualstack endpoint. If S3 transfer acceleration is ever +# enabled on the kurl-sh bucket the s3.accelerate.amazonaws.com endpoint can be +# used for both IPv4 and IPv6. +# this is not required for get_dist_url as *.kurl.sh endpoints have IPv6 addresses. +function get_dist_url_fallback() { + local url="$FALLBACK_URL" + if [ -n "${KURL_VERSION}" ]; then + url="${FALLBACK_URL}/${KURL_VERSION}" + fi + + if [ "$IPV6_ONLY" = "1" ]; then + echo "$url" | sed 's/s3\.amazonaws\.com/s3.dualstack.us-east-1.amazonaws.com/' + else + echo "$url" + fi +} + +function package_download() { + local package="$1" + local url_override="$2" + + if [ -z "$package" ]; then + bail "package_download called with no package name" + fi + + if [ -z "$url_override" ] && [ -z "${DIST_URL}" ]; then + logWarn "DIST_URL not set, will not download $1" + return + fi + + mkdir -p assets + touch assets/Manifest + + local etag= + local checksum= + etag="$(grep -F "${package}" assets/Manifest | awk 'NR == 1 {print $2}')" + checksum="$(grep -F "${package}" assets/Manifest | awk 'NR == 1 {print $3}')" + + if [ -n "${etag}" ] && ! package_matches_checksum "${package}" "${checksum}" ; then + etag= + fi + + local package_url= + if [ -z "$url_override" ]; then + package_url="$(get_dist_url)/${package}" + else + package_url="${url_override}" + fi + + local newetag= + newetag="$(curl -IfsSL "$package_url" | grep -i 'etag:' | sed -r 's/.*"(.*)".*/\1/')" + if [ -n "${etag}" ] && [ "${etag}" = "${newetag}" ]; then + echo "Package ${package} already exists, not downloading" + return + fi + + local filepath= + filepath="$(package_filepath "${package}")" + + sed -i "/^$(printf '%s' "${package}").*/d" assets/Manifest # remove from manifest + rm -f "${filepath}" # remove the file + + echo "Downloading package ${package}" + if [ -z "$url_override" ]; then + if [ -z "$FALLBACK_URL" ]; then + package_download_url_with_retry "$package_url" "${filepath}" + else + package_download_url_with_retry "$package_url" "${filepath}" || package_download_url_with_retry "$(get_dist_url_fallback)/${package}" "${filepath}" + fi + else + package_download_url_with_retry "${url_override}" "${filepath}" + fi + + checksum="$(md5sum "${filepath}" | awk '{print $1}')" + echo "${package} ${newetag} ${checksum}" >> assets/Manifest +} + +function package_download_url_with_retry() { + local url="$1" + local filepath="$2" + local max_retries="${3:-10}" + + local errcode= + local i=0 + while [ $i -ne "$max_retries" ]; do + errcode=0 + curl -fL -o "${filepath}" "${url}" || errcode="$?" + # 18 transfer closed with outstanding read data remaining + # 56 recv failure (connection reset by peer) + if [ "$errcode" -eq "18" ] || [ "$errcode" -eq "56" ]; then + i=$(($i+1)) + continue + fi + return "$errcode" + done + return "$errcode" +} + +function package_filepath() { + local package="$1" + echo "assets/${package}" +} + +function package_matches_checksum() { + local package="$1" + local checksum="$2" + + local filepath="$(package_filepath "${package}")" + + if [ -z "${checksum}" ]; then + return 1 + elif [ ! -f "${filepath}" ] || [ ! -s "${filepath}" ]; then # if not exists or empty + return 1 + elif ! md5sum "${filepath}" | grep -Fq "${checksum}" ; then + echo "Package ${package} checksum does not match" + return 1 + fi + return 0 +} + +function package_cleanup() { + if [ -z "${DIST_URL}" ] || [ "${AIRGAP}" = "1" ]; then + return + fi + addon_cleanup + rm -rf "${DIR}/packages" +} + +function insertOrReplaceJsonParam() { + if ! [ -f "$1" ]; then + # If settings file does not exist + mkdir -p "$(dirname "$1")" + echo "{\"$2\": \"$3\"}" > "$1" + else + # Settings file exists + if grep -q -E "\"$2\" *: *\"[^\"]*\"" "$1"; then + # If settings file contains named setting, replace it + sed -i -e "s/\"$2\" *: *\"[^\"]*\"/\"$2\": \"$3\"/g" "$1" + else + # Insert into settings file (with proper commas) + if [ $(wc -c <"$1") -ge 5 ]; then + # File long enough to actually have an entry, insert "name": "value",\n after first { + _commonJsonReplaceTmp="$(awk "NR==1,/^{/{sub(/^{/, \"{\\\"$2\\\": \\\"$3\\\", \")} 1" "$1")" + echo "$_commonJsonReplaceTmp" > "$1" + else + # file not long enough to actually have contents, replace wholesale + echo "{\"$2\": \"$3\"}" > "$1" + fi + fi + fi +} + +function semverParse() { + major="${1%%.*}" + minor="${1#$major.}" + minor="${minor%%.*}" + patch="${1#$major.$minor.}" + patch="${patch%%[-.]*}" +} + +SEMVER_COMPARE_RESULT= +function semverCompare() { + semverParse "$1" + _a_major="${major:-0}" + _a_minor="${minor:-0}" + _a_patch="${patch:-0}" + semverParse "$2" + _b_major="${major:-0}" + _b_minor="${minor:-0}" + _b_patch="${patch:-0}" + if [ "$_a_major" -lt "$_b_major" ]; then + SEMVER_COMPARE_RESULT=-1 + return + fi + if [ "$_a_major" -gt "$_b_major" ]; then + SEMVER_COMPARE_RESULT=1 + return + fi + if [ "$_a_minor" -lt "$_b_minor" ]; then + SEMVER_COMPARE_RESULT=-1 + return + fi + if [ "$_a_minor" -gt "$_b_minor" ]; then + SEMVER_COMPARE_RESULT=1 + return + fi + if [ "$_a_patch" -lt "$_b_patch" ]; then + SEMVER_COMPARE_RESULT=-1 + return + fi + if [ "$_a_patch" -gt "$_b_patch" ]; then + SEMVER_COMPARE_RESULT=1 + return + fi + SEMVER_COMPARE_RESULT=0 +} + +function log() { + printf "%s\n" "$1" 1>&2 +} + +function logSuccess() { + printf "${GREEN}✔ $1${NC}\n" 1>&2 +} + +function logStep() { + printf "${BLUE}⚙ $1${NC}\n" 1>&2 +} + +function logSubstep() { + printf "\t${LIGHT_BLUE}- $1${NC}\n" 1>&2 +} + +function logFail() { + printf "${RED}$1${NC}\n" 1>&2 +} + +function logWarn() { + printf "${YELLOW}$1${NC}\n" 1>&2 +} + +function bail() { + logFail "$@" + exit 1 +} + +function wait_for_nodes() { + if ! spinner_until 120 get_nodes_succeeds ; then + # this should exit script on non-zero exit code and print error message + kubectl get nodes 1>/dev/null + fi +} + +function get_nodes_succeeds() { + kubectl get nodes >/dev/null 2>&1 +} + +function wait_for_default_namespace() { + if ! spinner_until 120 has_default_namespace ; then + kubectl get ns + bail "No default namespace detected" + fi +} + +function has_default_namespace() { + kubectl get ns | grep -q '^default' 2>/dev/null +} + +# Label nodes as provisioned by kurl installation +# (these labels should have been added by kurl installation. +# See kubeadm-init and kubeadm-join yaml files. +# This bit will ensure the labels are added for pre-existing cluster +# during a kurl upgrade.) +function labelNodes() { + for NODE in $(kubectl get nodes --no-headers | awk '{print $1}');do + kurl_label=$(kubectl describe nodes $NODE | grep "kurl.sh\/cluster=true") || true + if [[ -z $kurl_label ]];then + kubectl label node --overwrite $NODE kurl.sh/cluster=true; + fi + done +} + +# warning - this only waits for the pod to be running, not for it to be 1/1 or otherwise accepting connections +function spinnerPodRunning() { + namespace=$1 + podPrefix=$2 + + local delay=0.75 + local spinstr='|/-\' + while ! kubectl -n "$namespace" get pods 2>/dev/null | grep "^$podPrefix" | awk '{ print $3}' | grep '^Running$' > /dev/null ; do + local temp=${spinstr#?} + printf " [%c] " "$spinstr" + local spinstr=$temp${spinstr%"$temp"} + sleep $delay + printf "\b\b\b\b\b\b" + done + printf " \b\b\b\b" +} + +COMPARE_DOCKER_VERSIONS_RESULT= +function compareDockerVersions() { + # reset + COMPARE_DOCKER_VERSIONS_RESULT= + compareDockerVersionsIgnorePatch "$1" "$2" + if [ "$COMPARE_DOCKER_VERSIONS_RESULT" -ne "0" ]; then + return + fi + parseDockerVersion "$1" + _a_patch="$DOCKER_VERSION_PATCH" + parseDockerVersion "$2" + _b_patch="$DOCKER_VERSION_PATCH" + if [ "$_a_patch" -lt "$_b_patch" ]; then + COMPARE_DOCKER_VERSIONS_RESULT=-1 + return + fi + if [ "$_a_patch" -gt "$_b_patch" ]; then + COMPARE_DOCKER_VERSIONS_RESULT=1 + return + fi + COMPARE_DOCKER_VERSIONS_RESULT=0 +} + +COMPARE_DOCKER_VERSIONS_RESULT= +function compareDockerVersionsIgnorePatch() { + # reset + COMPARE_DOCKER_VERSIONS_RESULT= + parseDockerVersion "$1" + _a_major="$DOCKER_VERSION_MAJOR" + _a_minor="$DOCKER_VERSION_MINOR" + parseDockerVersion "$2" + _b_major="$DOCKER_VERSION_MAJOR" + _b_minor="$DOCKER_VERSION_MINOR" + if [ "$_a_major" -lt "$_b_major" ]; then + COMPARE_DOCKER_VERSIONS_RESULT=-1 + return + fi + if [ "$_a_major" -gt "$_b_major" ]; then + COMPARE_DOCKER_VERSIONS_RESULT=1 + return + fi + if [ "$_a_minor" -lt "$_b_minor" ]; then + COMPARE_DOCKER_VERSIONS_RESULT=-1 + return + fi + if [ "$_a_minor" -gt "$_b_minor" ]; then + COMPARE_DOCKER_VERSIONS_RESULT=1 + return + fi + COMPARE_DOCKER_VERSIONS_RESULT=0 +} + +DOCKER_VERSION_MAJOR= +DOCKER_VERSION_MINOR= +DOCKER_VERSION_PATCH= +DOCKER_VERSION_RELEASE= +function parseDockerVersion() { + # reset + DOCKER_VERSION_MAJOR= + DOCKER_VERSION_MINOR= + DOCKER_VERSION_PATCH= + DOCKER_VERSION_RELEASE= + if [ -z "$1" ]; then + return + fi + + OLD_IFS="$IFS" && IFS=. && set -- $1 && IFS="$OLD_IFS" + DOCKER_VERSION_MAJOR=$1 + DOCKER_VERSION_MINOR=$2 + OLD_IFS="$IFS" && IFS=- && set -- $3 && IFS="$OLD_IFS" + DOCKER_VERSION_PATCH=$1 + DOCKER_VERSION_RELEASE=$2 +} + +function exportKubeconfig() { + local kubeconfig + kubeconfig="$(${K8S_DISTRO}_get_kubeconfig)" + + # To meet KUBERNETES_CIS_COMPLIANCE, the ${kubeconfig} needs to be owned by root:root + # and permissions set to 600 so users other than root cannot have access to kubectl + if [ "$KUBERNETES_CIS_COMPLIANCE" == "1" ]; then + chown root:root ${kubeconfig} + chmod 400 ${kubeconfig} + else + current_user_sudo_group + if [ -n "$FOUND_SUDO_GROUP" ]; then + chown root:$FOUND_SUDO_GROUP ${kubeconfig} + fi + chmod 440 ${kubeconfig} + fi + + if ! grep -q "kubectl completion bash" /etc/profile; then + if [ "$KUBERNETES_CIS_COMPLIANCE" != "1" ]; then + echo "export KUBECONFIG=${kubeconfig}" >> /etc/profile + fi + echo "if type _init_completion >/dev/null 2>&1; then source <(kubectl completion bash); fi" >> /etc/profile + fi +} + +function kubernetes_resource_exists() { + local namespace=$1 + local kind=$2 + local name=$3 + + kubectl -n "$namespace" get "$kind" "$name" &>/dev/null +} + +function install_cri() { + # In the event someone changes the installer spec from docker to containerd, maintain backward capability with old installs + if [ -n "$DOCKER_VERSION" ] ; then + export REPORTING_CONTEXT_INFO="docker $DOCKER_VERSION" + report_install_docker + export REPORTING_CONTEXT_INFO="" + elif [ -n "$CONTAINERD_VERSION" ]; then + export REPORTING_CONTEXT_INFO="containerd $CONTAINERD_VERSION" + report_install_containerd + export REPORTING_CONTEXT_INFO="" + fi +} + +function report_install_docker() { + report_addon_start "docker" "$DOCKER_VERSION" + install_docker + apply_docker_config + report_addon_success "docker" "$DOCKER_VERSION" +} + +function report_install_containerd() { + # if we haven't installed kubernetes yet we don't need to wory about containerd upgrades. + if [ -z "$CURRENT_KUBERNETES_VERSION" ] ; then + addon_install "containerd" "$CONTAINERD_VERSION" + return 0 + fi + + # if the node we are running this script is leveraging docker we also don't need to worry + # about the version of containerd we are installing, it won't be an upgrade anyways. + if node_is_using_docker ; then + addon_install "containerd" "$CONTAINERD_VERSION" + return 0 + fi + + # if we can't find containerd in the local filesystem then we can also install regardless + # of version. + if [ ! -f "/usr/bin/containerd" ]; then + addon_install "containerd" "$CONTAINERD_VERSION" + return 0 + fi + + # from now on we are migrating from one containerd version to another, restrictions apply. + local current_containerd_version + current_containerd_version=$(/usr/bin/containerd --version | cut -d " " -f3 | tr -d 'v') + containerd_evaluate_upgrade "$current_containerd_version" "$CONTAINERD_VERSION" + for version in "${CONTAINERD_INSTALL_VERSIONS[@]}"; do + logStep "Moving containerd to version v$version." + if [ "$version" != "$CONTAINERD_VERSION" ] && [ "$AIRGAP" != "1" ] ; then + log "Downloading containerd v$version." + addon_fetch "containerd" "$version" + fi + addon_install "containerd" "$version" + done +} + +function load_images() { + if [ -n "$DOCKER_VERSION" ]; then + find "$1" -type f | xargs -I {} bash -c "docker load < {}" + else + find "$1" -type f | xargs -I {} bash -c "cat {} | gunzip | ctr -a $(${K8S_DISTRO}_get_containerd_sock) -n=k8s.io images import -" + fi + + retag_gcr_images +} + +# try a command every 2 seconds until it succeeds, up to 30 tries max; useful for kubectl commands +# where the Kubernetes API could be restarting +function try_1m() { + local fn="$1" + local args=${@:2} + + n=0 + while ! $fn $args 2>/dev/null ; do + n="$(( $n + 1 ))" + if [ "$n" -ge "30" ]; then + # for the final try print the error and let it exit + echo "" + try_output="$($fn $args 2>&1)" || true + echo "$try_output" + bail "spent 1m attempting to run \"$fn $args\" without success" + fi + sleep 2 + done +} + +# try a command every 2 seconds until it succeeds, up to 150 tries max; useful for kubectl commands +# where the Kubernetes API could be restarting +function try_5m() { + local fn="$1" + local args=${@:2} + + n=0 + while ! $fn $args 2>/dev/null ; do + n="$(( $n + 1 ))" + if [ "$n" -ge "150" ]; then + # for the final try print the error and let it exit + echo "" + try_output="$($fn $args 2>&1)" || true + echo "$try_output" + bail "spent 5m attempting to run \"$fn $args\" without success" + fi + sleep 2 + done +} + +# try a command every 2 seconds until it succeeds, up to 30 tries max; useful for kubectl commands +# where the Kubernetes API could be restarting +# does not redirect stderr to /dev/null +function try_1m_stderr() { + local fn="$1" + local args=${@:2} + + n=0 + while ! $fn $args ; do + n="$(( $n + 1 ))" + if [ "$n" -ge "30" ]; then + # for the final try print the error and let it exit + echo "" + try_output="$($fn $args 2>&1)" || true + echo "$try_output" + bail "spent 1m attempting to run \"$fn $args\" without success" + fi + sleep 2 + done +} + +# Run a test every second with a spinner until it succeeds +function spinner_until() { + local timeoutSeconds="$1" + local cmd="$2" + local args=${@:3} + + if [ -z "$timeoutSeconds" ]; then + timeoutSeconds=-1 + fi + + local delay=1 + local elapsed=0 + local spinstr='|/-\' + + while ! $cmd $args; do + elapsed=$((elapsed + delay)) + if [ "$timeoutSeconds" -ge 0 ] && [ "$elapsed" -gt "$timeoutSeconds" ]; then + return 1 + fi + local temp=${spinstr#?} + printf " [%c] " "$spinstr" + local spinstr=$temp${spinstr%"$temp"} + sleep $delay + printf "\b\b\b\b\b\b" + done +} + +function sleep_spinner() { + local sleepSeconds="${1:-0}" + + local delay=1 + local elapsed=0 + local spinstr='|/-\' + + while true ; do + elapsed=$((elapsed + delay)) + if [ "$elapsed" -gt "$sleepSeconds" ]; then + return 0 + fi + local temp=${spinstr#?} + printf " [%c] " "$spinstr" + local spinstr=$temp${spinstr%"$temp"} + sleep $delay + printf "\b\b\b\b\b\b" + done +} + +function get_common() { + if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then + if [ -z "$FALLBACK_URL" ]; then + curl -sSOL "$(get_dist_url)/common.tar.gz" + else + curl -sSOL "$(get_dist_url)/common.tar.gz" || curl -sSOL "$(get_dist_url_fallback)/common.tar.gz" + fi + tar xf common.tar.gz + rm common.tar.gz + fi +} + +function get_shared() { + if [ -f shared/kurl-util.tar ]; then + if [ -n "$DOCKER_VERSION" ]; then + docker load < shared/kurl-util.tar + else + ctr -a "$(${K8S_DISTRO}_get_containerd_sock)" -n=k8s.io images import shared/kurl-util.tar + fi + fi +} + +function all_sudo_groups() { + # examples of lines we're looking for in any sudo config files to find group with root privileges + # %wheel ALL = (ALL) ALL + # %google-sudoers ALL=(ALL:ALL) NOPASSWD:ALL + # %admin ALL=(ALL) ALL + cat /etc/sudoers | grep -Eo '^%\S+\s+ALL\s?=.*ALL\b' | awk '{print $1 }' | sed 's/%//' + find /etc/sudoers.d/ -type f | xargs cat | grep -Eo '^%\S+\s+ALL\s?=.*ALL\b' | awk '{print $1 }' | sed 's/%//' +} + +# if the sudo group cannot be detected default to root +FOUND_SUDO_GROUP= +function current_user_sudo_group() { + if [ -z "$SUDO_UID" ]; then + return 0 + fi + # return the first sudo group the current user belongs to + while read -r groupName; do + if id "$SUDO_UID" -Gn | grep -q "\b${groupName}\b"; then + FOUND_SUDO_GROUP="$groupName" + return 0 + fi + done < <(all_sudo_groups) +} + +function kubeconfig_setup_outro() { + current_user_sudo_group + local owner="$SUDO_UID" + if [ -z "$owner" ]; then + # not currently running via sudo + owner="$USER" + else + # running via sudo - automatically create ~/.kube/config if it does not exist + ownerdir=$(eval echo "~$(id -un "$owner")") + + if [ ! -f "$ownerdir/.kube/config" ]; then + mkdir -p $ownerdir/.kube + cp "$(${K8S_DISTRO}_get_kubeconfig)" $ownerdir/.kube/config + chown -R $owner $ownerdir/.kube + + printf "To access the cluster with kubectl:\n" + printf "\n" + printf "${GREEN} bash -l${NC}\n" + printf "Kurl uses "$(${K8S_DISTRO}_get_kubeconfig)", you might want to unset KUBECONFIG to use .kube/config:\n" + printf "\n" + printf "${GREEN} echo unset KUBECONFIG >> ~/.bash_profile${NC}\n" + return + fi + fi + + printf "To access the cluster with kubectl:\n" + printf "\n" + printf "${GREEN} bash -l${NC}\n" + printf "\n" + printf "Kurl uses "$(${K8S_DISTRO}_get_kubeconfig)", you might want to copy kubeconfig to your home directory:\n" + printf "\n" + printf "${GREEN} cp "$(${K8S_DISTRO}_get_kubeconfig)" ~/.kube/config${NC}\n" + printf "${GREEN} chown -R ${owner} ~/.kube${NC}\n" + printf "${GREEN} echo unset KUBECONFIG >> ~/.bash_profile${NC}\n" + printf "${GREEN} bash -l${NC}\n" + printf "\n" + printf "You will likely need to use sudo to copy and chown "$(${K8S_DISTRO}_get_kubeconfig)".\n" +} + +function splitHostPort() { + oIFS="$IFS"; IFS=":" read -r HOST PORT <<< "$1"; IFS="$oIFS" +} + +function isValidIpv4() { + if echo "$1" | grep -qs '^[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*$'; then + return 0 + else + return 1 + fi +} + +function isValidIpv6() { + if echo "$1" | grep -qs "^\([0-9a-fA-F]\{0,4\}:\)\{1,7\}[0-9a-fA-F]\{0,4\}$"; then + return 0 + else + return 1 + fi +} + +function cert_has_san() { + local address=$1 + local san=$2 + + echo "Q" | openssl s_client -connect "$address" 2>/dev/null | openssl x509 -noout -text 2>/dev/null | grep --after-context=1 'X509v3 Subject Alternative Name' | grep -q "$2" +} + +# By default journald persists logs if the directory /var/log/journal exists so create it if it's +# not found. Sysadmins may still disable persistent logging with /etc/systemd/journald.conf. +function journald_persistent() { + if [ -d /var/log/journal ]; then + return 0 + fi + mkdir -p /var/log/journal + systemd-tmpfiles --create --prefix /var/log/journal + systemctl restart systemd-journald + journalctl --flush +} + +function rm_file() { + if [ -f "$1" ]; then + rm $1 + fi +} + +# Checks if the provided param is in the current path, and if it is not adds it +# this is useful for systems where /usr/local/bin is not in the path for root +function path_add() { + if [ -d "$1" ] && [[ ":$PATH:" != *":$1:"* ]]; then + PATH="${PATH:+"$PATH:"}$1" + fi +} + +function install_host_dependencies() { + install_host_dependencies_openssl +} + +function install_host_dependencies_openssl() { + if commandExists "openssl"; then + return + fi + + if is_rhel_9_variant ; then + yum_ensure_host_package openssl + return + fi + + if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then + local package="host-openssl.tar.gz" + package_download "${package}" + tar xf "$(package_filepath "${package}")" + fi + install_host_archives "${DIR}/packages/host/openssl" openssl +} + +function maybe_read_kurl_config_from_cluster() { + # if KURL_INSTALL_DIRECTORY_FLAG is set, use the value from the flag + if [ -z "$KURL_INSTALL_DIRECTORY_FLAG" ]; then + local kurl_install_directory_flag + # we don't yet have KUBECONFIG when this is called from the top of install.sh + kurl_install_directory_flag="$(KUBECONFIG="$(kubeadm_get_kubeconfig)" kubectl -n kube-system get cm kurl-config -ojsonpath='{ .data.kurl_install_directory }' 2>/dev/null || echo "")" + if [ -n "$kurl_install_directory_flag" ]; then + KURL_INSTALL_DIRECTORY_FLAG="$kurl_install_directory_flag" + KURL_INSTALL_DIRECTORY="$(realpath "$kurl_install_directory_flag")/kurl" + fi + fi + + # this function currently only sets KURL_INSTALL_DIRECTORY + # there are many other settings in kurl-config +} + +KURL_INSTALL_DIRECTORY=/var/lib/kurl +function pushd_install_directory() { + local dir= + dir="$(dirname "$KURL_INSTALL_DIRECTORY")" + if [ ! -e "$dir" ] ; then + bail "kURL installation directory $dir does not exist." + fi + if [ ! -d "$dir" ] ; then + bail "kURL installation directory $dir is not a directory." + fi + + KURL_INSTALL_DIRECTORY="$(realpath "$KURL_INSTALL_DIRECTORY")" + + local tmpfile= + tmpfile="$KURL_INSTALL_DIRECTORY/tmpfile" + if ! mkdir -p "$KURL_INSTALL_DIRECTORY" || ! touch "$tmpfile" ; then + bail "Directory $KURL_INSTALL_DIRECTORY is not writeable by this script. +Please either change the directory permissions or override the +installation directory with the flag \"kurl-install-directory\"." + fi + rm "$tmpfile" + pushd "$KURL_INSTALL_DIRECTORY" 1>/dev/null +} + +function popd_install_directory() { + popd 1>/dev/null +} + +function move_airgap_assets() { + local cwd + cwd="$(pwd)" + + if [ "$(readlink -f $KURL_INSTALL_DIRECTORY)" = "${cwd}/kurl" ]; then + return + fi + + pushd_install_directory # make sure we have access + popd_install_directory + + # The airgap bundle will extract everything into ./kurl directory. + # Move all assets except the scripts into the $KURL_INSTALL_DIRECTORY to emulate the online install experience. + if [ "$(ls -A "${cwd}"/kurl)" ]; then + for file in "${cwd}"/kurl/*; do + rm -rf "${KURL_INSTALL_DIRECTORY}/$(basename ${file})" + mv "${file}" "${KURL_INSTALL_DIRECTORY}/" + done + fi +} + +function get_docker_registry_ip_flag() { + local docker_registry_ip="$1" + if [ -z "${docker_registry_ip}" ]; then + return + fi + echo " docker-registry-ip=${docker_registry_ip}" +} + +function get_skip_system_package_install_flag() { + if [ "${SKIP_SYSTEM_PACKAGE_INSTALL}" != "1" ]; then + return + fi + echo " skip-system-package-install" +} + +function get_exclude_builtin_host_preflights_flag() { + if [ "${EXCLUDE_BUILTIN_HOST_PREFLIGHTS}" != "1" ]; then + return + fi + echo " exclude-builtin-host-preflights" +} + +function get_additional_no_proxy_addresses_flag() { + local has_proxy="$1" + local no_proxy_addresses="$2" + if [ -z "${has_proxy}" ]; then + return + fi + echo " additional-no-proxy-addresses=${no_proxy_addresses}" +} + +function get_kurl_install_directory_flag() { + local kurl_install_directory="$1" + if [ -z "${kurl_install_directory}" ] || [ "${kurl_install_directory}" = "/var/lib/kurl" ]; then + return + fi + echo " kurl-install-directory=$(echo "${kurl_install_directory}")" +} + +function get_remotes_flags() { + local control_plane_label= + control_plane_label="$(kubernetes_get_control_plane_label)" + while read -r primary; do + printf " primary-host=$primary" + done < <(kubectl get nodes --no-headers --selector="$control_plane_label" -owide | awk '{ print $6 }') + + while read -r secondary; do + printf " secondary-host=$secondary" + done < <(kubectl get node --no-headers --selector='!'"$control_plane_label" -owide | awk '{ print $6 }') +} + +function get_ipv6_flag() { + if [ "$IPV6_ONLY" = "1" ]; then + echo " ipv6" + fi +} + +function systemd_restart_succeeded() { + local oldPid=$1 + local serviceName=$2 + + if ! systemctl is-active --quiet $serviceName; then + return 1 + fi + + local newPid="$(systemctl show --property MainPID $serviceName | cut -d = -f2)" + if [ "$newPid" = "$oldPid" ]; then + return 1 + fi + + if ps -p $oldPid >/dev/null 2>&1; then + return 1 + fi + + return 0 +} + +function restart_systemd_and_wait() { + local serviceName=$1 + + local pid="$(systemctl show --property MainPID $serviceName | cut -d = -f2)" + + echo "Restarting $serviceName..." + systemctl restart $serviceName + + if ! spinner_until 120 systemd_restart_succeeded $pid $serviceName; then + journalctl -xe + bail "Could not successfully restart systemd service $serviceName" + fi + + echo "Service $serviceName restarted." +} + +# returns true when a job has completed +function job_is_completed() { + local namespace="$1" + local jobName="$2" + kubectl get jobs -n "$namespace" "$jobName" | grep -q '1/1' +} + +function maybe() { + local cmd="$1" + local args=( "${@:2}" ) + + $cmd "${args[@]}" 2>/dev/null || true +} + +MACHINE_ID= +KURL_INSTANCE_UUID= +function get_machine_id() { + MACHINE_ID="$(${DIR}/bin/kurl host protectedid || true)" + if [ -f /etc/kurl/uuid ]; then + KURL_INSTANCE_UUID="$(cat /etc/kurl/uuid)" + else + if [ -f "${KURL_INSTALL_DIRECTORY}/uuid" ]; then + KURL_INSTANCE_UUID="$(cat ${KURL_INSTALL_DIRECTORY}/uuid)" + rm -f "${KURL_INSTALL_DIRECTORY}/uuid" + else + KURL_INSTANCE_UUID=$(< /dev/urandom tr -dc a-z0-9 | head -c32) + fi + # use /etc/kurl to persist the instance id "machine id" across cluster reset + mkdir -p /etc/kurl + echo "$KURL_INSTANCE_UUID" > /etc/kurl/uuid + fi +} + +function kebab_to_camel() { + echo "$1" | sed -E 's/-(.)/\U\1/g' +} + +function build_installer_prefix() { + local installer_id="$1" + local kurl_version="$2" + local kurl_url="$3" + local proxy_address="$4" + + if [ -z "${kurl_url}" ]; then + echo "cat " + return + fi + + local curl_flags= + if [ -n "${proxy_address}" ]; then + curl_flags=" -x ${proxy_address}" + fi + + if [ -n "${kurl_version}" ]; then + echo "curl -fsSL${curl_flags} ${kurl_url}/version/${kurl_version}/${installer_id}/" + else + echo "curl -fsSL${curl_flags} ${kurl_url}/${installer_id}/" + fi +} + +# get_local_node_name returns the name of the current node. +function get_local_node_name() { + echo "$HOSTNAME" +} + +# this waits for a deployment to have all replicas up-to-date and available +function deployment_fully_updated() { + x_fully_updated "$1" deployment "$2" +} + +# this waits for a statefulset to have all replicas up-to-date and available +function statefulset_fully_updated() { + x_fully_updated "$1" statefulset "$2" +} + +# this waits for a resource type (deployment or statefulset) to have all replicas up-to-date and available +function x_fully_updated() { + local namespace=$1 + local resourcetype=$2 + local name=$3 + + local desiredReplicas + desiredReplicas=$(kubectl get $resourcetype -n "$namespace" "$name" -o jsonpath='{.status.replicas}') + + local availableReplicas + availableReplicas=$(kubectl get $resourcetype -n "$namespace" "$name" -o jsonpath='{.status.availableReplicas}') + + local readyReplicas + readyReplicas=$(kubectl get $resourcetype -n "$namespace" "$name" -o jsonpath='{.status.readyReplicas}') + + local updatedReplicas + updatedReplicas=$(kubectl get $resourcetype -n "$namespace" "$name" -o jsonpath='{.status.updatedReplicas}') + + if [ "$desiredReplicas" != "$availableReplicas" ] ; then + return 1 + fi + + if [ "$desiredReplicas" != "$readyReplicas" ] ; then + return 1 + fi + + if [ "$desiredReplicas" != "$updatedReplicas" ] ; then + return 1 + fi + + return 0 +} + +# this waits for a daemonset to have all replicas up-to-date and available +function daemonset_fully_updated() { + local namespace=$1 + local daemonset=$2 + + local desiredNumberScheduled + desiredNumberScheduled=$(kubectl get daemonset -n "$namespace" "$daemonset" -o jsonpath='{.status.desiredNumberScheduled}') + + local currentNumberScheduled + currentNumberScheduled=$(kubectl get daemonset -n "$namespace" "$daemonset" -o jsonpath='{.status.currentNumberScheduled}') + + local numberAvailable + numberAvailable=$(kubectl get daemonset -n "$namespace" "$daemonset" -o jsonpath='{.status.numberAvailable}') + + local numberReady + numberReady=$(kubectl get daemonset -n "$namespace" "$daemonset" -o jsonpath='{.status.numberReady}') + + local updatedNumberScheduled + updatedNumberScheduled=$(kubectl get daemonset -n "$namespace" "$daemonset" -o jsonpath='{.status.updatedNumberScheduled}') + + if [ "$desiredNumberScheduled" != "$numberAvailable" ] ; then + return 1 + fi + + if [ "$desiredNumberScheduled" != "$currentNumberScheduled" ] ; then + return 1 + fi + + if [ "$desiredNumberScheduled" != "$numberAvailable" ] ; then + return 1 + fi + + if [ "$desiredNumberScheduled" != "$numberReady" ] ; then + return 1 + fi + + if [ "$desiredNumberScheduled" != "$updatedNumberScheduled" ] ; then + return 1 + fi + + return 0 +} + +# pods_gone_by_selector returns true if there are no pods matching the given selector +function pods_gone_by_selector() { + local namespace=$1 + local selector=$2 + [ "$(pod_count_by_selector "$namespace" "$selector")" = "0" ] +} + +# pod_count_by_selector returns the number of pods matching the given selector or -1 if the command fails +function pod_count_by_selector() { + local namespace=$1 + local selector=$2 + + local pods= + if ! pods="$(kubectl -n "$namespace" get pods --no-headers -l "$selector" 2>/dev/null)" ; then + echo -1 + fi + + echo -n "$pods" | wc -l +} + +# retag_gcr_images takes every k8s.gcr.io image and adds a registry.k8s.io alias if it does not already exist +# and vice versa +function retag_gcr_images() { + local images= + local image= + local new_image= + if [ -n "$DOCKER_VERSION" ]; then + images=$(docker images --format '{{.Repository}}:{{.Tag}}' | { grep -F k8s.gcr.io || true; }) + for image in $images ; do + new_image="${image//k8s.gcr.io/registry.k8s.io}" + docker tag "$image" "$new_image" 2>/dev/null || true + done + images=$(docker images --format '{{.Repository}}:{{.Tag}}' | { grep -F registry.gcr.io || true; }) + for image in $images ; do + new_image="${image//registry.k8s.io/k8s.gcr.io}" + docker tag "$image" "$new_image" 2>/dev/null || true + done + else + images=$(ctr -n=k8s.io images list --quiet | { grep -F k8s.gcr.io || true; }) + for image in $images ; do + new_image="${image//k8s.gcr.io/registry.k8s.io}" + ctr -n k8s.io images tag "$image" "$new_image" 2>/dev/null || true + done + images=$(ctr -n=k8s.io images list --quiet | { grep -F registry.gcr.io || true; }) + for image in $images ; do + new_image="${image//registry.k8s.io/k8s.gcr.io}" + ctr -n k8s.io images tag "$image" "$new_image" 2>/dev/null || true + done + fi +} + +function canonical_image_name() { + local image="$1" + if echo "$image" | grep -vq '/' ; then + image="library/$image" + fi + if echo "$image" | awk -F'/' '{print $1}' | grep -vq '\.' ; then + image="docker.io/$image" + fi + if echo "$image" | grep -vq ':' ; then + image="$image:latest" + fi + echo "$image" +} + +# check_for_running_pods scans for pod(s) in a namespace and checks whether their status is running/completed +# note: Evicted pods are exempt from this check +function check_for_running_pods() { + local namespace=$1 + local is_job_controller=0 + local ns_pods= + local status= + local containers= + + ns_pods=$(kubectl get pods -n "$namespace" -o jsonpath='{.items[*].metadata.name}') + + if [ -z "$ns_pods" ]; then + return 0 + fi + + for pod in $ns_pods; do + status=$(kubectl get pod "$pod" -n "$namespace" -o jsonpath='{.status.phase}') + + # determine if pod is manged by a Job + if kubectl get pod "$pod" -n "$namespace" -o jsonpath='{.metadata.ownerReferences[*].kind}' | grep -q "Job"; then + is_job_controller=1 + fi + + # ignore pods that have been Evicted + if [ "$status" == "Failed" ] && [[ $(kubectl get pod "$pod" -n "$namespace" -o jsonpath='{.status.reason}') == "Evicted" ]]; then + continue + fi + + if [ "$status" != "Running" ] && [ "$status" != "Succeeded" ]; then + return 1 + fi + + containers=$(kubectl get pod "$pod" -n "$namespace" -o jsonpath="{.spec.containers[*].name}") + for container in $containers; do + container_status=$(kubectl get pod "$pod" -n "$namespace" -o jsonpath="{.status.containerStatuses[?(@.name==\"$container\")].ready}") + + # ignore container ready status for pods managed by the Job controller + if [ "$container_status" != "true" ] && [ "$is_job_controller" = "0" ]; then + return 1 + fi + done + done + + return 0 +} + +# retry a command if it fails up to $1 number of times +# Usage: cmd_retry 3 curl --globoff --noproxy "*" --fail --silent --insecure https://10.128.0.25:6443/healthz +function cmd_retry() { + local retries=$1 + shift + + local count=0 + until "$@"; do + exit=$? + wait=$((2 ** $count)) + count=$(($count + 1)) + if [ $count -lt $retries ]; then + echo "Retry $count/$retries exited $exit, retrying in $wait seconds..." + sleep $wait + else + echo "Retry $count/$retries exited $exit, no more retries left." + return $exit + fi + done + return 0 +} + +# common_upgrade_step_versions returns a list of upgrade steps that need to be performed, based on +# the supplied space-delimited set of step versions, for use by other functions. +# e.g. "1.5.12\n1.6.11\n1.7.11" +function common_upgrade_step_versions() { + local step_versions= + read -ra step_versions <<< "$1" + local from_version=$2 + local desired_version=$3 + + local to_version= + to_version=$(common_upgrade_version_to_major_minor "$desired_version") + + # check that major versions are the same + local first_major= + first_major=$(common_upgrade_major_minor_to_major "$from_version") + local last_major= + last_major=$(common_upgrade_major_minor_to_major "$to_version") + if [ "$first_major" != "$last_major" ]; then + bail "Upgrade accross major version from $from_version to $to_version is not supported." + fi + + local first_minor= + local last_minor= + first_minor=$(common_upgrade_major_minor_to_minor "$from_version") + first_minor=$((first_minor + 1)) # exclusive of from_version + last_minor=$(common_upgrade_major_minor_to_minor "$to_version") + + if [ "${#step_versions[@]}" -le "$last_minor" ]; then + bail "Upgrade from $from_version to $to_version is not supported." + fi + + # if there are no steps to perform, return + if [ "$first_minor" -gt "$last_minor" ]; then + return + fi + + if [ "$desired_version" != "$to_version" ]; then + last_minor=$((last_minor - 1)) # last version is the desired version + fi + + local step= + for (( step=first_minor ; step<=last_minor ; step++ )); do + echo "${step_versions[$step]}" + done + if [ "$desired_version" != "$to_version" ]; then + echo "$desired_version" + fi +} + +# common_upgrade_compare_versions prints 0 if the versions are equal, 1 if the first is greater, +# and -1 if the second is greater. +function common_upgrade_compare_versions() { + local a="$1" + local b="$2" + + local a_major= + local b_major= + a_major=$(common_upgrade_major_minor_to_major "$a") + b_major=$(common_upgrade_major_minor_to_major "$b") + + if [ "$a_major" -lt "$b_major" ]; then + echo "-1" + return + elif [ "$a_major" -gt "$b_major" ]; then + echo "1" + return + fi + + local a_minor= + local b_minor= + a_minor=$(common_upgrade_major_minor_to_minor "$a") + b_minor=$(common_upgrade_major_minor_to_minor "$b") + + if [ "$a_minor" -lt "$b_minor" ]; then + echo "-1" + return + elif [ "$a_minor" -gt "$b_minor" ]; then + echo "1" + return + fi + + echo "0" +} + +# common_upgrade_is_version_included returns 0 if the version is included in the range. +function common_upgrade_is_version_included() { + local from_version="$1" + local to_version="$2" + local current_version="$3" + # if current_version is greater than from_version and current_version is less than or equal to to_version + [ "$(common_upgrade_compare_versions "$current_version" "$from_version")" = "1" ] && \ + [ "$(common_upgrade_compare_versions "$current_version" "$to_version")" != "1" ] +} + +# common_upgrade_max_version will return the greater of the two versions. +function common_upgrade_max_version() { + local a="$1" + local b="$2" + if [ "$(common_upgrade_compare_versions "$a" "$b")" = "1" ]; then + echo "$a" + else + echo "$b" + fi +} + +# common_upgrade_print_list_of_minor_upgrades prints message of minor versions that will be +# upgraded. e.g. "1.0.x to 1.1, 1.1 to 1.2, 1.2 to 1.3, and 1.3 to 1.4" +function common_upgrade_print_list_of_minor_upgrades() { + local from_version="$1" + local to_version="$2" + + printf "This involves upgrading from " + local first_minor= + local last_minor= + first_minor=$(common_upgrade_major_minor_to_minor "$from_version") + last_minor=$(common_upgrade_major_minor_to_minor "$to_version") + + local minor= + for (( minor=first_minor ; minor/dev/null 2>&1 ; then + SKIP_DOCKER_INSTALL=1 + if [ -n "$DOCKER_VERSION" ]; then + echo "Docker already exists on this machine so no docker install will be performed" + fi + fi + + discover_public_ip + discover_private_ip + + KERNEL_MAJOR=$(uname -r | cut -d'.' -f1) + KERNEL_MINOR=$(uname -r | cut -d'.' -f2) +} + +LSB_DIST= +DIST_VERSION= +DIST_VERSION_MAJOR= +DIST_VERSION_MINOR= +detectLsbDist() { + _dist= + _error_msg="We have checked /etc/os-release and /etc/centos-release files." + if [ -f /etc/centos-release ] && [ -r /etc/centos-release ]; then + # CentOS 6 example: CentOS release 6.9 (Final) + # CentOS 7 example: CentOS Linux release 7.5.1804 (Core) + _dist="$(cat /etc/centos-release | cut -d" " -f1)" + _version="$(cat /etc/centos-release | sed 's/Linux //' | sed 's/Stream //' | cut -d" " -f3 | cut -d "." -f1-2)" + elif [ -f /etc/os-release ] && [ -r /etc/os-release ]; then + _dist="$(. /etc/os-release && echo "$ID")" + _version="$(. /etc/os-release && echo "$VERSION_ID")" + elif [ -f /etc/redhat-release ] && [ -r /etc/redhat-release ]; then + # this is for RHEL6 + _dist="rhel" + _major_version=$(cat /etc/redhat-release | cut -d" " -f7 | cut -d "." -f1) + _minor_version=$(cat /etc/redhat-release | cut -d" " -f7 | cut -d "." -f2) + _version=$_major_version + elif [ -f /etc/system-release ] && [ -r /etc/system-release ]; then + if grep --quiet "Amazon Linux" /etc/system-release; then + # Special case for Amazon 2014.03 + _dist="amzn" + _version=$(awk '/Amazon Linux/{print $NF}' /etc/system-release) + fi + else + _error_msg="$_error_msg\nDistribution cannot be determined because neither of these files exist." + fi + + if [ -n "$_dist" ]; then + _error_msg="$_error_msg\nDetected distribution is ${_dist}." + _dist="$(echo "$_dist" | tr '[:upper:]' '[:lower:]')" + case "$_dist" in + ubuntu) + _error_msg="$_error_msg\nHowever detected version $_version is less than 12." + oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; + [ $1 -ge 12 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 + ;; + debian) + _error_msg="$_error_msg\nHowever detected version $_version is less than 7." + oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; + [ $1 -ge 7 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 + ;; + fedora) + _error_msg="$_error_msg\nHowever detected version $_version is less than 21." + oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; + [ $1 -ge 21 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 + ;; + rhel) + _error_msg="$_error_msg\nHowever detected version $_version is less than 7." + oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; + [ $1 -ge 6 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 && DIST_VERSION_MINOR="${DIST_VERSION#$DIST_VERSION_MAJOR.}" && DIST_VERSION_MINOR="${DIST_VERSION_MINOR%%.*}" + ;; + rocky) + _error_msg="$_error_msg\nHowever detected version $_version is less than 7." + oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; + [ $1 -ge 6 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 && DIST_VERSION_MINOR="${DIST_VERSION#$DIST_VERSION_MAJOR.}" && DIST_VERSION_MINOR="${DIST_VERSION_MINOR%%.*}" + ;; + centos) + _error_msg="$_error_msg\nHowever detected version $_version is less than 6." + oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; + [ $1 -ge 6 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 && DIST_VERSION_MINOR="${DIST_VERSION#$DIST_VERSION_MAJOR.}" && DIST_VERSION_MINOR="${DIST_VERSION_MINOR%%.*}" + ;; + amzn) + _error_msg="$_error_msg\nHowever detected version $_version is not one of\n 2, 2.0, 2018.03, 2017.09, 2017.03, 2016.09, 2016.03, 2015.09, 2015.03, 2014.09, 2014.03." + [ "$_version" = "2" ] || [ "$_version" = "2.0" ] || \ + [ "$_version" = "2018.03" ] || \ + [ "$_version" = "2017.03" ] || [ "$_version" = "2017.09" ] || \ + [ "$_version" = "2016.03" ] || [ "$_version" = "2016.09" ] || \ + [ "$_version" = "2015.03" ] || [ "$_version" = "2015.09" ] || \ + [ "$_version" = "2014.03" ] || [ "$_version" = "2014.09" ] && \ + LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$_version + ;; + sles) + _error_msg="$_error_msg\nHowever detected version $_version is less than 12." + oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; + [ $1 -ge 12 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 + ;; + ol) + _error_msg="$_error_msg\nHowever detected version $_version is less than 6." + oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; + [ $1 -ge 6 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 && DIST_VERSION_MINOR="${DIST_VERSION#$DIST_VERSION_MAJOR.}" && DIST_VERSION_MINOR="${DIST_VERSION_MINOR%%.*}" + ;; + *) + _error_msg="$_error_msg\nThat is an unsupported distribution." + ;; + esac + fi + + if [ -z "$LSB_DIST" ]; then + echo >&2 "$(echo | sed "i$_error_msg")" + echo >&2 "" + echo >&2 "Please visit the following URL for more detailed installation instructions:" + echo >&2 "" + echo >&2 " https://help.replicated.com/docs/distributing-an-application/installing/" + exit 1 + fi +} + +export CURRENT_KUBERNETES_VERSION= + +export KUBERNETES_UPGRADE=0 + +function discoverCurrentKubernetesVersion() { + local fullCluster="$1" + + CURRENT_KUBERNETES_VERSION=$(maybe discover_local_kubernetes_version) + + if [ -z "$CURRENT_KUBERNETES_VERSION" ]; then + # This is a new install and no upgrades are required + return 0 + fi + + if [ -z "$fullCluster" ]; then + return 0 + fi + + # Populate arrays with versions of remote nodes + kubernetes_get_remote_primaries + kubernetes_get_secondaries + + semverCompare "$CURRENT_KUBERNETES_VERSION" "$KUBERNETES_VERSION" + if [ "$SEMVER_COMPARE_RESULT" = "-1" ]; then + KUBERNETES_UPGRADE=1 + elif [ "$SEMVER_COMPARE_RESULT" = "1" ]; then + bail "The current Kubernetes version $CURRENT_KUBERNETES_VERSION is greater than target version $KUBERNETES_VERSION" + fi + + # Check for upgrades required on remote primaries + for node in "${!KUBERNETES_REMOTE_PRIMARIES[@]}"; do + semverCompare "${KUBERNETES_REMOTE_PRIMARY_VERSIONS[$node]}" "$KUBERNETES_VERSION" + if [ "$SEMVER_COMPARE_RESULT" = "-1" ]; then + KUBERNETES_UPGRADE=1 + elif [ "$SEMVER_COMPARE_RESULT" = "1" ]; then + bail "The current Kubernetes version $CURRENT_KUBERNETES_VERSION is greater than target version $KUBERNETES_VERSION on remote primary $node" + fi + done + + # Check for upgrades required on remote secondaries + for node in "${!KUBERNETES_SECONDARIES[@]}"; do + semverCompare "${KUBERNETES_SECONDARY_VERSIONS[$node]}" "$KUBERNETES_VERSION" + if [ "$SEMVER_COMPARE_RESULT" = "-1" ]; then + KUBERNETES_UPGRADE=1 + elif [ "$SEMVER_COMPARE_RESULT" = "1" ]; then + bail "The current Kubernetes version $CURRENT_KUBERNETES_VERSION is greater than target version $KUBERNETES_VERSION on remote worker $node" + fi + done +} + +function discover_local_kubernetes_version() { + grep -s ' image: ' /etc/kubernetes/manifests/kube-apiserver.yaml | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' +} + +function get_docker_version() { + if ! commandExists "docker" ; then + return + fi + docker -v | awk '{gsub(/,/, "", $3); print $3}' +} + +discover_public_ip() { + if [ "$AIRGAP" == "1" ]; then + return + fi + + # gce + set +e + _out=$(curl --noproxy "*" --max-time 5 --connect-timeout 2 -qSfs -H 'Metadata-Flavor: Google' http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip 2>/dev/null) + _status=$? + set -e + if [ "$_status" -eq "0" ] && [ -n "$_out" ]; then + if isValidIpv4 "$_out" || isValidIpv6 "$_out"; then + PUBLIC_ADDRESS=$_out + fi + return + fi + + # ec2 + set +e + _out=$(curl --noproxy "*" --max-time 5 --connect-timeout 2 -qSfs http://169.254.169.254/latest/meta-data/public-ipv4 2>/dev/null) + _status=$? + set -e + if [ "$_status" -eq "0" ] && [ -n "$_out" ]; then + if isValidIpv4 "$_out" || isValidIpv6 "$_out"; then + PUBLIC_ADDRESS=$_out + fi + return + fi + + # azure + set +e + _out=$(curl --noproxy "*" --max-time 5 --connect-timeout 2 -qSfs -H Metadata:true "http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/publicIpAddress?api-version=2017-08-01&format=text" 2>/dev/null) + _status=$? + set -e + if [ "$_status" -eq "0" ] && [ -n "$_out" ]; then + if isValidIpv4 "$_out" || isValidIpv6 "$_out"; then + PUBLIC_ADDRESS=$_out + fi + return + fi +} + +function discover_private_ip() { + if [ -n "$PRIVATE_ADDRESS" ]; then + return 0 + fi + PRIVATE_ADDRESS="$(${K8S_DISTRO}_discover_private_ip)" +} + +function discover_non_loopback_nameservers() { + local resolvConf=/etc/resolv.conf + # https://github.com/kubernetes/kubernetes/blob/v1.19.3/cmd/kubeadm/app/componentconfigs/kubelet.go#L211 + if systemctl is-active -q systemd-resolved; then + resolvConf=/run/systemd/resolve/resolv.conf + fi + cat $resolvConf | grep -E '^nameserver\s+' | grep -Eqv '^nameserver\s+127' +} + + +function init_daemon_json() { + if [ -f /etc/docker/daemon.json ]; then + return + fi + + mkdir -p /etc/docker + + # Change cgroup driver to systemd + # Docker uses cgroupfs by default to manage cgroup. On distributions using systemd, + # i.e. RHEL and Ubuntu, this causes issues because there are now 2 seperate ways + # to manage resources. For more info see the link below. + # https://github.com/kubernetes/kubeadm/issues/1394#issuecomment-462878219 + # + if [ ! -f /var/lib/kubelet/kubeadm-flags.env ]; then + cat > /etc/docker/daemon.json < /etc/docker/daemon.json </dev/null | grep 'Storage Driver' | awk '{print $3}' | awk -F- '{print $1}') + if [ "$_driver" = "devicemapper" ] && docker info 2>/dev/null | grep -Fqs 'Data loop file:' ; then + printf "${RED}The running Docker daemon is configured to use the 'devicemapper' storage driver \ +in loopback mode.\nThis is not recommended for production use. Please see to the following URL for more \ +information.\n\nhttps://help.replicated.com/docs/kb/developer-resources/devicemapper-warning/.${NC}\n\n\ +" + # HARD_FAIL_ON_LOOPBACK + if [ -n "$1" ]; then + printf "${RED}Please configure a recommended storage driver and try again.${NC}\n\n" + exit 1 + fi + + printf "Do you want to proceed anyway? " + if ! confirmN; then + exit 0 + fi + fi +} + +docker_configure_proxy() { + log "Configuring docker proxy" + local previous_proxy=$(docker info 2>/dev/null | grep -i 'Http Proxy:' | awk '{ print $NF }') + local previous_no_proxy=$(docker info 2>/dev/null | grep -i 'No Proxy:' | awk '{ print $NF }') + log "Previous proxy: ($previous_proxy)" + log "Previous no proxy: ($previous_no_proxy)" + if [ "$PROXY_ADDRESS" = "$previous_proxy" ] && [ "$NO_PROXY_ADDRESSES" = "$previous_no_proxy" ]; then + log "No changes were found. Proxy configuration still the same" + return + fi + + log "Updating proxy configuration: HTTP_PROXY=${PROXY_ADDRESS} NO_PROXY=${NO_PROXY_ADDRESSES}" + mkdir -p /etc/systemd/system/docker.service.d + local file=/etc/systemd/system/docker.service.d/http-proxy.conf + + echo "# Generated by kURL" > $file + echo "[Service]" >> $file + + if echo "$PROXY_ADDRESS" | grep -q "^https"; then + echo "Environment=\"HTTPS_PROXY=${PROXY_ADDRESS}\" \"NO_PROXY=${NO_PROXY_ADDRESSES}\"" >> $file + else + echo "Environment=\"HTTP_PROXY=${PROXY_ADDRESS}\" \"NO_PROXY=${NO_PROXY_ADDRESSES}\"" >> $file + fi + + restart_docker +} + +function docker_get_host_packages_online() { + local version="$1" + + if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then + rm -rf $DIR/packages/docker/${version} # Cleanup broken/incompatible packages from failed runs + + local package="docker-${version}.tar.gz" + package_download "${package}" + tar xf "$(package_filepath "${package}")" + # rm docker-${version}.tar.gz + fi +} + +# It will only uninstall docker if is a new installation +# and the installer has containerd set to workaround the bug issue: +# `dpkg: no, cannot proceed with removal of containerd ... docker.io +# depends on containerd (>= 1.2.6-0ubuntu1~) containerd is to be removed.` +# More info: https://bugs.launchpad.net/ubuntu/+source/docker.io/+bug/1940920 +# https://bugs.launchpad.net/ubuntu/+source/docker.io/+bug/1939140 +function uninstall_docker_new_installs_with_containerd() { + + # If docker is not installed OR if containerd is not in the spec + # then, the docker should not be uninstalled + if ! commandExists docker || [ -z "$CONTAINERD_VERSION" ]; then + return + fi + + # if k8s is installed already then, the docker should not be uninstalled + # so that it can be properly migrated to containerd + if kubernetes_resource_exists kube-system configmap kurl-config; then + return + fi + + logStep "Uninstalling Docker to avoid conflicts with containerd package.\n" + + if [ "$(docker ps -aq | wc -l)" != "0" ] ; then + docker ps -aq | xargs docker rm -f || true + fi + # The rm -rf /var/lib/docker command below may fail with device busy error, so remove as much + # data as possible now + docker system prune --all --volumes --force || true + systemctl disable docker.service --now || true + + # Note that the docker.io can only be removed because it is prior install containerd and + # it is a new install. Otherwise, this dep is required. + # Important: The conflict is only removed when we uninstall docker.io + case "$LSB_DIST" in + ubuntu) + export DEBIAN_FRONTEND=noninteractive + dpkg --purge docker.io docker-ce docker-ce-cli + ;; + + centos|rhel|amzn|ol) + local dockerPackages=("docker.io" "docker-ce" "docker-ce-cli") + if rpm -qa | grep -q 'docker-ce-rootless-extras'; then + dockerPackages+=("docker-ce-rootless-extras") + fi + if rpm -qa | grep -q 'docker-scan-plugin'; then + dockerPackages+=("docker-scan-plugin") + fi + rpm --erase ${dockerPackages[@]} + ;; + esac + + rm -rf /var/lib/docker /var/lib/dockershim || true + rm -f /var/run/dockershim.sock || true + rm -f /var/run/docker.sock || true + echo "Docker successfully uninstalled to allow to install containerd." +} + + +function install_helm() { + if [ -n "$HELM_HELMFILE_SPEC" ] && kubernetes_is_master; then + + BIN_HELM=$DIR/bin/helm + BIN_HELMFILE=$DIR/bin/helmfile + + cp -f $DIR/helm/helm $DIR/bin + cp -f $DIR/helm/helmfile $DIR/bin + + fi +} + +function helmfile_sync() { + + if [ -z "$HELM_HELMFILE_SPEC" ]; then + return 0 + fi + + logStep "Installing Helm Charts using the Helmfile Spec" + + # TODO (dan): add reporting for helm + # report_helm_start + + printf "${HELM_HELMFILE_SPEC}" > helmfile-tmp.yaml + + if [ "$AIRGAP" != "1" ]; then + $BIN_HELMFILE -b $BIN_HELM --file helmfile-tmp.yaml deps # || report_helm_failure #TODO (dan): add reporting + fi + # TODO (dan): To support air gap case, we might need to modify the helmfile to always run the local chart + + $BIN_HELMFILE -b $BIN_HELM --file helmfile-tmp.yaml sync # || report_helm_failure #TODO (dan): add reporting + + rm helmfile-tmp.yaml + + # TODO (dan): add reporting for helm + # report_helm_success +} + +function helm_load() { + if [ "$AIRGAP" = "1" ] && [ -n "$HELM_HELMFILE_SPEC" ] ; then + # TODO (dan): Implement airgapped loading after bundler is updated + bail "Airgap Installation with Helm is currently not supported" + #load_images $DIR/helm-bundle/images + fi +} + +#!/bin/bash + +function install_host_archives() { + local dir="$1" + local dir_prefix="/archives" + local packages=("${@:2}") + _install_host_packages "$dir" "$dir_prefix" "${packages[@]}" +} + +function install_host_packages() { + local dir="$1" + local dir_prefix="" + local packages=("${@:2}") + _install_host_packages "$dir" "$dir_prefix" "${packages[@]}" +} + +function rpm_force_install_host_archives() { + local dir="$1" + local dir_prefix="/archives" + local packages=("${@:2}") + _rpm_force_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" +} + +function rpm_force_install_host_packages() { + local dir="$1" + local dir_prefix="" + local packages=("${@:2}") + _rpm_force_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" +} + +function _install_host_packages() { + local dir="$1" + local dir_prefix="$2" + local packages=("${@:3}") + + case "$LSB_DIST" in + ubuntu) + _dpkg_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" + ;; + + centos|rhel|ol|rocky) + if [ "$DIST_VERSION_MAJOR" = "9" ]; then + _yum_install_host_packages_el9 "$dir" "$dir_prefix" "${packages[@]}" + else + _yum_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" + fi + ;; + + amzn) + local fullpath= + fullpath="$(realpath "${dir}")/rhel-7-force${dir_prefix}" + if test -n "$(shopt -s nullglob; echo "${fullpath}"/*.rpm)" ; then + _rpm_force_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" + else + _yum_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" + fi + ;; + + *) + bail "Host package install is not supported on ${LSB_DIST} ${DIST_MAJOR}" + ;; + esac +} + +function _rpm_force_install_host_packages() { + if [ "${SKIP_SYSTEM_PACKAGE_INSTALL}" == "1" ]; then + logStep "Skipping installation of host packages: ${packages[*]}" + return + fi + + local dir="$1" + local dir_prefix="$2" + local packages=("${@:3}") + + logStep "Installing host packages ${packages[*]}" + + local fullpath= + fullpath="$(realpath "${dir}")/rhel-7-force${dir_prefix}" + if ! test -n "$(shopt -s nullglob; echo "${fullpath}"/*.rpm)" ; then + echo "Will not install host packages ${packages[*]}, no packages found." + return 0 + fi + + rpm --upgrade --force --nodeps --nosignature "${fullpath}"/*.rpm + + logSuccess "Host packages ${packages[*]} installed" +} + +function dpkg_install_host_archives() { + local dir="$1" + local dir_prefix="/archives" + local packages=("${@:2}") + _dpkg_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" +} + +function dpkg_install_host_packages() { + local dir="$1" + local dir_prefix="" + local packages=("${@:2}") + _dpkg_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" +} + +function _dpkg_apt_get_status_and_maybe_fix_broken_pkgs() { + logStep "Checking package manager status" + if apt-get check status ; then + logSuccess "Status checked successfully. No broken packages were found." + return + fi + + logWarn "Attempting to correct broken packages by running 'apt-get install --fix-broken --no-remove --yes'" + # Let's use || true here for when be required to remove the packages we properly should the error message + # with the steps to get it fix manually + apt-get install --fix-broken --no-remove --yes || true + if apt-get check status ; then + logSuccess "Broken packages fixed successfully" + return + fi + logFail "Unable to fix broken packages. Manual intervention is required." + logFail "Run the command 'apt-get check status' to get further information." +} + +function _dpkg_install_host_packages() { + if [ "${SKIP_SYSTEM_PACKAGE_INSTALL}" == "1" ]; then + logStep "Skipping installation of host packages: ${packages[*]}" + return + fi + + local dir="$1" + local dir_prefix="$2" + local packages=("${@:3}") + + logStep "Installing host packages ${packages[*]}" + + local fullpath= + fullpath="${dir}/ubuntu-${DIST_VERSION}${dir_prefix}" + if ! test -n "$(shopt -s nullglob; echo "${fullpath}"/*.deb)" ; then + echo "Will not install host packages ${packages[*]}, no packages found." + return 0 + fi + + DEBIAN_FRONTEND=noninteractive dpkg --install --force-depends-version --force-confold --auto-deconfigure "${fullpath}"/*.deb + + logSuccess "Host packages ${packages[*]} installed" + + _dpkg_apt_get_status_and_maybe_fix_broken_pkgs +} + +function yum_install_host_archives() { + local dir="$1" + local dir_prefix="/archives" + local packages=("${@:2}") + if [ "$DIST_VERSION_MAJOR" = "9" ]; then + _yum_install_host_packages_el9 "$dir" "$dir_prefix" "${packages[@]}" + else + _yum_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" + fi +} + +function yum_install_host_packages() { + local dir="$1" + local dir_prefix="" + local packages=("${@:2}") + if [ "$DIST_VERSION_MAJOR" = "9" ]; then + _yum_install_host_packages_el9 "$dir" "$dir_prefix" "${packages[@]}" + else + _yum_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" + fi +} + +function _yum_install_host_packages() { + if [ "${SKIP_SYSTEM_PACKAGE_INSTALL}" == "1" ]; then + logStep "Skipping installation of host packages: ${packages[*]}" + return + fi + + local dir="$1" + local dir_prefix="$2" + local packages=("${@:3}") + + logStep "Installing host packages ${packages[*]}" + + local fullpath= + fullpath="$(_yum_get_host_packages_path "${dir}" "${dir_prefix}")" + if ! test -n "$(shopt -s nullglob; echo "${fullpath}"/*.rpm)" ; then + echo "Will not install host packages ${packages[*]}, no packages found." + return 0 + fi + cat > /etc/yum.repos.d/kurl.local.repo < $next_version" + if semverCompare "$next_version" "$previous_version" && [ "$SEMVER_COMPARE_RESULT" -lt "0" ]; then + if uname -r | grep -q "el8" ; then + yum --disablerepo=* --enablerepo=kurl.local downgrade --allowerasing -y "${packages[@]}" + else + yum --disablerepo=* --enablerepo=kurl.local downgrade -y "${packages[@]}" + fi + fi + logSuccess "Downgraded containerd" + fi + # shellcheck disable=SC2086 + if [[ "${packages[*]}" == *"containerd.io"* && -n $(uname -r | grep "el8") ]]; then + yum --disablerepo=* --enablerepo=kurl.local install --allowerasing -y "${packages[@]}" + else + yum --disablerepo=* --enablerepo=kurl.local install -y "${packages[@]}" + fi + yum clean metadata --disablerepo=* --enablerepo=kurl.local + rm /etc/yum.repos.d/kurl.local.repo + + reset_dnf_module_kurl_local + + logSuccess "Host packages ${packages[*]} installed" +} + +function _yum_install_host_packages_el9() { + if [ "${SKIP_SYSTEM_PACKAGE_INSTALL}" == "1" ]; then + logStep "Skipping installation of host packages: ${packages[*]}" + return + fi + + local dir="$1" + local dir_prefix="$2" + local packages=("${@:3}") + + logStep "Installing host packages ${packages[*]}" + + local fullpath= + fullpath="$(_yum_get_host_packages_path "$dir" "$dir_prefix")" + if ! test -n "$(shopt -s nullglob; echo "$fullpath"/*.rpm)" ; then + echo "Will not install host packages ${packages[*]}, no packages found." + return 0 + fi + + local repoprefix= + repoprefix="$(echo "${dir%"/"}" | awk -F'/' '{ print $(NF-1) "-" $NF }')" + if [ -n "$dir_prefix" ]; then + repoprefix="$repoprefix.${dir_prefix/#"/"}" + fi + + local reponame="$repoprefix.kurl.local" + local repopath="$KURL_INSTALL_DIRECTORY.repos/$repoprefix" + + mkdir -p "$KURL_INSTALL_DIRECTORY.repos" + rm -rf "$repopath" + cp -r "$fullpath" "$repopath" + + cat > "/etc/yum.repos.d/$reponame.repo" </dev/null || true +} + +# is_rhel_9_variant returns 0 if the current distro is RHEL 9 or a derivative +function is_rhel_9_variant() { + if [ "$DIST_VERSION_MAJOR" != "9" ]; then + return 1 + fi + + case "$LSB_DIST" in + centos|rhel|ol|rocky) + return 0 + ;; + *) + return 1 + ;; + esac +} + +# yum_ensure_host_package ensures that a package is installed on the host +function yum_ensure_host_package() { + local package="$1" + + if ! yum_is_host_package_installed "$package" ; then + logStep "Installing host package $package" + if ! yum install -y "$package" ; then + logFail "Failed to install host package $package." + logFail "Please install $package and try again." + bail " yum install $package" + fi + logSuccess "Host package $package installed" + fi +} + +# preflights_require_host_packages ensures that all required host packages are installed or +# available. +function preflights_require_host_packages() { + if ! is_rhel_9_variant ; then + return # only rhel 9 requires this + fi + + logStep "Checking required host packages" + + local distro=rhel-9 + + local fail=0 + + local dir= + for dir in addons/*/ packages/*/ ; do + local addon= + addon=$(basename "$dir") + if [ "$addon" = "*" ]; then + # the directory is empty. this is likely a bug + logWarn "No add-ons found in $(dirname "$dir")" + continue + fi + local varname="${addon^^}_VERSION" + varname="${varname//-/_}" + local addon_version="${!varname}" + if [ -z "$addon_version" ]; then + continue + fi + local deps_file="${dir}$addon_version/$distro/Deps" + if [ ! -f "$deps_file" ]; then + continue + fi + local dep= + while read -r dep ; do + if ! yum_is_host_package_installed_or_available "$dep" ; then + if [ "$fail" = "0" ]; then + echo "" + fail=1 + fi + logFail "Host package $dep is required by $addon $addon_version" + fi + done <"$deps_file" + done + + if [ "$fail" = "1" ]; then + echo "" + log "Host packages are missing. Please install them and re-run the install script." + if [ "$KURL_DISMISS_HOST_PACKAGES_PREFLIGHT" != "1" ]; then + log "Run the script again with flag \"dismiss-host-packages-preflight\" to continue." + exit 1 + fi + else + logSuccess "Required host packages are installed or available" + fi +} + +# yum_is_host_package_installed returns 0 if the package is installed on the host +function yum_is_host_package_installed() { + local package="$1" + + yum list installed "$package" >/dev/null 2>&1 +} + +# yum_is_host_package_installed_or_available returns 0 if the package is installed or available +function yum_is_host_package_installed_or_available() { + local package="$1" + + if yum list installed "$package" >/dev/null 2>&1 ; then + return 0 + fi + + if yum list available "$package" >/dev/null 2>&1 ; then + return 0 + fi + + return 1 +} + +#!/bin/bash + +function kubernetes_pre_init() { + if is_rhel_9_variant ; then + # git is packaged in the bundle and installed in other oses by + # kubernetes_install_host_packages + yum_ensure_host_package git + fi +} + +function kubernetes_host() { + kubernetes_load_modules + kubernetes_load_ipv4_modules + kubernetes_load_ipv6_modules + kubernetes_load_ipvs_modules + + if [ "$SKIP_KUBERNETES_HOST" = "1" ]; then + return 0 + fi + + kubernetes_install_host_packages "$KUBERNETES_VERSION" + + kubernetes_load_images "$KUBERNETES_VERSION" + + install_plugins + + install_kustomize +} + +function kubernetes_load_images() { + local version="$1" + + local varname="KUBERNETES_IMAGES_LOADED_${version//./_}" + if [ "${!varname:-}" = "1" ]; then + # images already loaded for this version + return 0 + fi + + load_images "$DIR/packages/kubernetes/$version/images" + if [ -n "$SONOBUOY_VERSION" ] && [ -d "$DIR/packages/kubernetes-conformance/$version/images" ]; then + load_images "$DIR/packages/kubernetes-conformance/$version/images" + fi + + declare -g "$varname"=1 +} + +function kubernetes_get_packages() { + if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then + kubernetes_get_host_packages_online "$KUBERNETES_VERSION" + kubernetes_get_conformance_packages_online "$KUBERNETES_VERSION" + fi +} + +# kubernetes_maybe_get_packages_airgap downloads kubernetes packages if they are not already present +function kubernetes_maybe_get_packages_airgap() { + if [ "$AIRGAP" != "1" ]; then + return + fi + if [ -d "$DIR/packages/kubernetes/$KUBERNETES_VERSION/assets" ]; then + return + fi + addon_fetch_airgap "kubernetes" "$KUBERNETES_VERSION" +} + +function kubernetes_load_ipvs_modules() { + if lsmod | grep -q ip_vs ; then + return + fi + + if [ "$KERNEL_MAJOR" -gt "4" ] || \ + { [ "$KERNEL_MAJOR" -eq "4" ] && [ "$KERNEL_MINOR" -ge "19" ]; } || \ + { + { [ "$LSB_DIST" = "ol" ] || [ "$LSB_DIST" = "rhel" ] || [ "$LSB_DIST" = "centos" ] || [ "$LSB_DIST" = "rocky" ]; } && \ + { [ "$DIST_VERSION_MAJOR" = "8" ] || [ "$DIST_VERSION_MAJOR" = "9" ] || [ "$DIST_VERSION_MINOR" -gt "2" ]; }; \ + }; then + modprobe nf_conntrack + else + modprobe nf_conntrack_ipv4 + fi + + rm -f /etc/modules-load.d/replicated-ipvs.conf + + echo "Adding kernel modules ip_vs, ip_vs_rr, ip_vs_wrr, and ip_vs_sh" + modprobe ip_vs + modprobe ip_vs_rr + modprobe ip_vs_wrr + modprobe ip_vs_sh + + echo "nf_conntrack_ipv4" > /etc/modules-load.d/99-replicated-ipvs.conf + # shellcheck disable=SC2129 + echo "ip_vs" >> /etc/modules-load.d/99-replicated-ipvs.conf + echo "ip_vs_rr" >> /etc/modules-load.d/99-replicated-ipvs.conf + echo "ip_vs_wrr" >> /etc/modules-load.d/99-replicated-ipvs.conf + echo "ip_vs_sh" >> /etc/modules-load.d/99-replicated-ipvs.conf +} + +function kubernetes_load_modules() { + if ! lsmod | grep -Fq br_netfilter ; then + echo "Adding kernel module br_netfilter" + modprobe br_netfilter + fi + echo "br_netfilter" > /etc/modules-load.d/99-replicated.conf +} + +function kubernetes_load_ipv4_modules() { + if [ "$IPV6_ONLY" = "1" ]; then + return 0 + fi + + if ! lsmod | grep -q ^ip_tables ; then + echo "Adding kernel module ip_tables" + modprobe ip_tables + fi + echo "ip_tables" > /etc/modules-load.d/99-replicated-ipv4.conf + + echo "net.bridge.bridge-nf-call-iptables = 1" > /etc/sysctl.d/99-replicated-ipv4.conf + echo "net.ipv4.conf.all.forwarding = 1" >> /etc/sysctl.d/99-replicated-ipv4.conf + + sysctl --system + + if [ "$(cat /proc/sys/net/ipv4/ip_forward)" = "0" ]; then + bail "Failed to enable IP forwarding." + fi +} + +function kubernetes_load_ipv6_modules() { + if [ "$IPV6_ONLY" != "1" ]; then + return 0 + fi + + if ! lsmod | grep -q ^ip6_tables ; then + echo "Adding kernel module ip6_tables" + modprobe ip6_tables + fi + echo "ip6_tables" > /etc/modules-load.d/99-replicated-ipv6.conf + + echo "net.bridge.bridge-nf-call-ip6tables = 1" > /etc/sysctl.d/99-replicated-ipv6.conf + echo "net.ipv6.conf.all.forwarding = 1" >> /etc/sysctl.d/99-replicated-ipv6.conf + + sysctl --system + + if [ "$(cat /proc/sys/net/ipv6/conf/all/forwarding)" = "0" ]; then + bail "Failed to enable IPv6 forwarding." + fi +} + +# k8sVersion is an argument because this may be used to install step versions of K8s during an upgrade +# to the target version +function kubernetes_install_host_packages() { + k8sVersion=$1 + + logStep "Install kubelet, kubectl and cni host packages" + + if kubernetes_host_commands_ok "$k8sVersion"; then + logSuccess "Kubernetes host packages already installed" + + kubernetes_cis_chmod_kubelet_service_file + + # less command is broken if libtinfo.so.5 is missing in amazon linux 2 + if [ "$LSB_DIST" == "amzn" ] && [ "$AIRGAP" != "1" ] && ! file_exists "/usr/lib64/libtinfo.so.5"; then + if [ -d "$DIR/packages/kubernetes/${k8sVersion}/assets" ]; then + install_host_packages "${DIR}/packages/kubernetes/${k8sVersion}" ncurses-compat-libs + fi + fi + + return + fi + + cat > "$DIR/tmp-kubeadm.conf" < /dev/null +} + +function spinner_kubernetes_api_healthy() { + if ! spinner_until 120 kubernetes_api_is_healthy; then + bail "Kubernetes API failed to report healthy" + fi +} + +function spinner_containerd_is_healthy() { + if ! spinner_until 120 containerd_is_healthy; then + bail "Containerd failed to restart" + fi +} + +# With AWS NLB kubectl commands may fail to connect to the Kubernetes API immediately after a single +# successful health check +function spinner_kubernetes_api_stable() { + echo "Waiting for kubernetes api health to report ok" + for i in {1..10}; do + sleep 1 + spinner_kubernetes_api_healthy + done +} + +function kubernetes_drain() { + local deleteEmptydirDataFlag="--delete-emptydir-data" + local k8sVersion= + k8sVersion=$(grep ' image: ' /etc/kubernetes/manifests/kube-apiserver.yaml | grep -oE '[0-9]+\.[0-9]+\.[0-9]+') + local k8sVersionMinor= + k8sVersionMinor=$(kubernetes_version_minor "$k8sVersion") + if [ "$k8sVersionMinor" -lt "20" ]; then + deleteEmptydirDataFlag="--delete-local-data" + fi + # --pod-selector='app!=csi-attacher,app!=csi-provisioner' + # https://longhorn.io/docs/1.3.2/volumes-and-nodes/maintenance/#updating-the-node-os-or-container-runtime + if kubernetes_has_remotes ; then + kubectl drain "$1" \ + "$deleteEmptydirDataFlag" \ + --ignore-daemonsets \ + --force \ + --grace-period=30 \ + --timeout=120s \ + --pod-selector 'app notin (rook-ceph-mon,rook-ceph-osd,rook-ceph-osd-prepare,rook-ceph-operator,rook-ceph-agent),k8s-app!=kube-dns, name notin (restic)' || true + else + # On single node installs force drain to delete pods or + # else the command will timeout when evicting pods with pod disruption budgets + kubectl drain "$1" \ + "$deleteEmptydirDataFlag" \ + --ignore-daemonsets \ + --force \ + --grace-period=30 \ + --timeout=120s \ + --disable-eviction \ + --pod-selector 'app notin (rook-ceph-mon,rook-ceph-osd,rook-ceph-osd-prepare,rook-ceph-operator,rook-ceph-agent),k8s-app!=kube-dns, name notin (restic)' || true + fi +} + +function kubernetes_node_has_version() { + local name="$1" + local version="$2" + + local actual_version="$(try_1m kubernetes_node_kubelet_version $name)" + + [ "$actual_version" = "v${version}" ] +} + +function kubernetes_node_kubelet_version() { + local name="$1" + + kubectl get node "$name" -o=jsonpath='{@.status.nodeInfo.kubeletVersion}' +} + +function kubernetes_any_remote_master_unupgraded() { + while read -r master; do + local name=$(echo $master | awk '{ print $1 }') + if ! kubernetes_node_has_version "$name" "$KUBERNETES_VERSION"; then + return 0 + fi + done < <(kubernetes_remote_masters) + return 1 +} + +function kubernetes_any_worker_unupgraded() { + while read -r worker; do + local name=$(echo $worker | awk '{ print $1 }') + if ! kubernetes_node_has_version "$name" "$KUBERNETES_VERSION"; then + return 0 + fi + done < <(kubernetes_workers) + return 1 +} + +function kubelet_version() { + kubelet --version | cut -d ' ' -f 2 | sed 's/v//' +} + +function kubernetes_scale_down() { + local ns="$1" + local kind="$2" + local name="$3" + + kubernetes_scale "$ns" "$kind" "$name" "0" +} + +function kubernetes_scale() { + local ns="$1" + local kind="$2" + local name="$3" + local replicas="$4" + + if ! kubernetes_resource_exists "$ns" "$kind" "$name"; then + return 0 + fi + + kubectl -n "$ns" scale "$kind" "$name" --replicas="$replicas" +} + +function kubernetes_secret_value() { + local ns="$1" + local name="$2" + local key="$3" + + kubectl -n "$ns" get secret "$name" -ojsonpath="{ .data.$key }" 2>/dev/null | base64 --decode +} + +function kubernetes_is_master() { + if [ "$MASTER" = "1" ]; then + return 0 + elif [ -f /etc/kubernetes/manifests/kube-apiserver.yaml ]; then + return 0 + else + return 1 + fi +} + +function discover_pod_subnet() { + # TODO check ipv6 cidr for overlaps + if [ "$IPV6_ONLY" = "1" ]; then + if [ -z "$POD_CIDR" ]; then + POD_CIDR="fd00:c00b:1::/112" + fi + return 0 + fi + + local excluded="" + if ! ip route show src "$PRIVATE_ADDRESS" | awk '{ print $1 }' | grep -q '/'; then + excluded="--exclude-subnet=${PRIVATE_ADDRESS}/16" + fi + + if [ -n "$POD_CIDR" ]; then + local podCidrSize=$(echo $POD_CIDR | awk -F'/' '{ print $2 }') + + # if pod-cidr flag and pod-cidr-range are both set, validate pod-cidr is as large as pod-cidr-range + if [ -n "$POD_CIDR_RANGE" ]; then + if [ "$podCidrSize" -gt "$POD_CIDR_RANGE" ]; then + bail "Pod cidr must be at least /$POD_CIDR_RANGE" + fi + fi + + # if pod cidr flag matches existing weave pod cidr don't validate + if [ "$POD_CIDR" = "$EXISTING_POD_CIDR" ]; then + return 0 + elif [ -n "$EXISTING_POD_CIDR" ]; then + bail "Pod cidr cannot be changed to $POD_CIDR because existing cidr is $EXISTING_POD_CIDR" + fi + + if $DIR/bin/subnet --subnet-alloc-range "$POD_CIDR" --cidr-range "$podCidrSize" "$excluded" 1>/dev/null; then + return 0 + fi + + printf "${RED}Pod cidr ${POD_CIDR} overlaps with existing route. Continue? ${NC}" + if ! confirmY ; then + exit 1 + fi + return 0 + fi + # detected from weave device + if [ -n "$EXISTING_POD_CIDR" ]; then + echo "Using existing pod network ${EXISTING_POD_CIDR}" + POD_CIDR="$EXISTING_POD_CIDR" + return 0 + fi + local size="$POD_CIDR_RANGE" + if [ -z "$size" ]; then + size="20" + fi + # find a network for the Pods, preferring start at 10.32.0.0 + if podnet=$($DIR/bin/subnet --subnet-alloc-range "10.32.0.0/16" --cidr-range "$size" "$excluded"); then + echo "Found pod network: $podnet" + POD_CIDR="$podnet" + return 0 + fi + + if podnet=$($DIR/bin/subnet --subnet-alloc-range "10.0.0.0/8" --cidr-range "$size" "$excluded"); then + echo "Found pod network: $podnet" + POD_CIDR="$podnet" + return 0 + fi + + bail "Failed to find available subnet for pod network. Use the pod-cidr flag to set a pod network" +} + +# This must run after discover_pod_subnet since it excludes the pod cidr +function discover_service_subnet() { + # TODO check ipv6 cidr for overlaps + if [ "$IPV6_ONLY" = "1" ]; then + if [ -z "$SERVICE_CIDR" ]; then + SERVICE_CIDR="fd00:c00b:2::/112" + fi + return 0 + fi + local excluded="--exclude-subnet=$POD_CIDR" + if ! ip route show src "$PRIVATE_ADDRESS" | awk '{ print $1 }' | grep -q '/'; then + excluded="$excluded,${PRIVATE_ADDRESS}/16" + fi + + EXISTING_SERVICE_CIDR=$(maybe kubeadm_cluster_configuration | grep serviceSubnet | awk '{ print $2 }') + + if [ -n "$SERVICE_CIDR" ]; then + local serviceCidrSize=$(echo $SERVICE_CIDR | awk -F'/' '{ print $2 }') + + # if service-cidr flag and service-cidr-range are both set, validate service-cidr is as large as service-cidr-range + if [ -n "$SERVICE_CIDR_RANGE" ]; then + if [ "$serviceCidrSize" -gt "$SERVICE_CIDR_RANGE" ]; then + bail "Service cidr must be at least /$SERVICE_CIDR_RANGE" + fi + fi + + # if service-cidr flag matches existing service cidr don't validate + if [ "$SERVICE_CIDR" = "$EXISTING_SERVICE_CIDR" ]; then + return 0 + elif [ -n "$EXISTING_SERVICE_CIDR" ]; then + bail "Service cidr cannot be changed to $SERVICE_CIDR because existing cidr is $EXISTING_SERVICE_CIDR" + fi + + if $DIR/bin/subnet --subnet-alloc-range "$SERVICE_CIDR" --cidr-range "$serviceCidrSize" "$excluded" 1>/dev/null; then + return 0 + fi + + printf "${RED}Service cidr ${SERVICE_CIDR} overlaps with existing route. Continue? ${NC}" + if ! confirmY ; then + exit 1 + fi + return 0 + fi + + if [ -n "$EXISTING_SERVICE_CIDR" ]; then + echo "Using existing service network ${EXISTING_SERVICE_CIDR}" + SERVICE_CIDR="$EXISTING_SERVICE_CIDR" + return 0 + fi + + local size="$SERVICE_CIDR_RANGE" + if [ -z "$size" ]; then + size="22" + fi + + # find a network for the services, preferring start at 10.96.0.0 + if servicenet=$($DIR/bin/subnet --subnet-alloc-range "10.96.0.0/16" --cidr-range "$size" "$excluded"); then + echo "Found service network: $servicenet" + SERVICE_CIDR="$servicenet" + return 0 + fi + + if servicenet=$($DIR/bin/subnet --subnet-alloc-range "10.0.0.0/8" --cidr-range "$size" "$excluded"); then + echo "Found service network: $servicenet" + SERVICE_CIDR="$servicenet" + return 0 + fi + + bail "Failed to find available subnet for service network. Use the service-cidr flag to set a service network" +} + +function kubernetes_node_images() { + local nodeName="$1" + + kubectl get node "$nodeName" -ojsonpath="{range .status.images[*]}{ range .names[*] }{ @ }{'\n'}{ end }{ end }" +} + +function list_all_required_images() { + echo "$KURL_UTIL_IMAGE" + + find packages/kubernetes/$KUBERNETES_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' + + if [ -n "$DOCKER_VERSION" ]; then + find packages/docker/$DOCKER_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' + fi + + for dir in addons/*/ ; do + local addon= + addon=$(basename "$dir") + local varname="${addon^^}_VERSION" + varname="${varname//-/_}" + local addon_version="${!varname}" + if [ -z "$addon_version" ]; then + continue + fi + local manifest_file="addons/$addon/$addon_version/Manifest" + if [ ! -f "$manifest_file" ]; then + continue + fi + grep -E '^image' "$manifest_file" | grep -v no_remote_load | awk '{ print $3 }' + done +} + +function kubernetes_node_has_all_images() { + local node_name="$1" + + local image_list= + while read -r image; do + if ! kubernetes_node_has_image "$node_name" "$image"; then + image_list="$image_list $image" + fi + done < <(list_all_required_images) + + image_list=$(echo "$image_list" | xargs) # strip leading and trailing whitespace + + if [ -n "$image_list" ]; then + log "" + logWarn "Node $node_name missing image(s) $image_list" + return 1 + fi +} + +# kubernetes_nodes_missing_images will return a list of nodes that are missing any of the images in +# the provided list +function kubernetes_nodes_missing_images() { + local images_list="$1" + local target_host="$2" + local exclude_hosts="$3" + + if [ -z "$images_list" ]; then + return + fi + + # shellcheck disable=SC2086 + "$DIR"/bin/kurl cluster nodes-missing-images --image="$KURL_UTIL_IMAGE" --target-host="$target_host" --exclude-host="$exclude_hosts" $images_list +} + +function kubernetes_node_has_image() { + local node_name="$1" + local image="$2" + + while read -r node_image; do + if [ "$(canonical_image_name "$node_image")" = "$(canonical_image_name "$image")" ]; then + return 0 + fi + done < <(kubernetes_node_images "$node_name") + + return 1 +} + +KUBERNETES_REMOTE_PRIMARIES=() +KUBERNETES_REMOTE_PRIMARY_VERSIONS=() +function kubernetes_get_remote_primaries() { + local primary= + while read -r primary ; do + local name= + name=$(echo "$primary" | awk '{ print $1 }') + local version= + version="$(try_1m kubernetes_node_kubelet_version "$name")" + + KUBERNETES_REMOTE_PRIMARIES+=( "$name" ) + KUBERNETES_REMOTE_PRIMARY_VERSIONS+=( "${version#v}" ) # strip leading v + done < <(kubernetes_remote_masters) +} + +KUBERNETES_SECONDARIES=() +KUBERNETES_SECONDARY_VERSIONS=() +function kubernetes_get_secondaries() { + local secondary= + while read -r secondary ; do + local name= + name=$(echo "$secondary" | awk '{ print $1 }') + local version= + version="$(try_1m kubernetes_node_kubelet_version "$name")" + + KUBERNETES_SECONDARIES+=( "$name" ) + KUBERNETES_SECONDARY_VERSIONS+=( "${version#v}" ) # strip leading v + done < <(kubernetes_workers) +} + +function kubernetes_load_balancer_address() { + maybe kubeadm_cluster_configuration | grep 'controlPlaneEndpoint:' | sed 's/controlPlaneEndpoint: \|"//g' +} + +function kubernetes_pod_started() { + local name=$1 + local namespace=$2 + + local phase=$(kubectl -n $namespace get pod $name -ojsonpath='{ .status.phase }') + case "$phase" in + Running|Failed|Succeeded) + return 0 + ;; + esac + + return 1 +} + +function kubernetes_pod_completed() { + local name=$1 + local namespace=$2 + + local phase=$(kubectl -n $namespace get pod $name -ojsonpath='{ .status.phase }') + case "$phase" in + Failed|Succeeded) + return 0 + ;; + esac + + return 1 +} + +function kubernetes_pod_succeeded() { + local name="$1" + local namespace="$2" + + local phase=$(kubectl -n $namespace get pod $name -ojsonpath='{ .status.phase }') + [ "$phase" = "Succeeded" ] +} + +function kubernetes_is_current_cluster() { + local api_service_address="$1" + if grep -sq "${api_service_address}" /etc/kubernetes/kubelet.conf ; then + return 0 + fi + if grep -sq "${api_service_address}" "$KUBEADM_CONF_FILE" ; then + return 0 + fi + return 1 +} + +function kubernetes_is_join_node() { + if grep -sq 'kind: JoinConfiguration' "$KUBEADM_CONF_FILE" ; then + return 0 + fi + return 1 +} + +function kubernetes_is_installed() { + if kubectl cluster-info >/dev/null 2>&1 ; then + return 0 + fi + if ps aux | grep '[k]ubelet' ; then + return 0 + fi + if commandExists kubelet ; then + return 0 + fi + return 1 +} + +function kubeadm_cluster_configuration() { + kubectl get cm -o yaml -n kube-system kubeadm-config -ojsonpath='{ .data.ClusterConfiguration }' +} + +function kubeadm_cluster_status() { + kubectl get cm -o yaml -n kube-system kubeadm-config -ojsonpath='{ .data.ClusterStatus }' +} + +function check_network() { + logStep "Checking cluster networking" + + if [ -n "$WEAVE_VERSION" ]; then + log "Checking if weave-net binary can be found in the path /opt/cni/bin/" + if ! ls -la /opt/cni/bin/ | grep weave-net; then + logWarn "Unable to find weave-net binary, deleting weave-net pod so that the binary will be recreated" + kubectl delete pods --selector=name=weave-net --field-selector="spec.nodeName=$(get_local_node_name)" -n kube-system --ignore-not-found=true + fi + fi + + if ! kubernetes_any_node_ready; then + echo "Waiting up to 10 minutes for node to report Ready" + if ! spinner_until 600 kubernetes_any_node_ready ; then + # Output the nodes for we know more about the problem + kubectl get nodes + bail "Any Node failed to report Ready" + fi + fi + + kubectl delete pods kurlnet-client kurlnet-server --force --grace-period=0 &>/dev/null || true + + cat </dev/null +} + +function kubernetes_service_exists() { + kubectl -n default get service kubernetes &>/dev/null +} + +function kubernetes_all_nodes_ready() { + local node_statuses= + node_statuses="$(kubectl get nodes --no-headers 2>/dev/null | awk '{ print $2 }')" + # no nodes are not ready and at least one node is ready + if echo "${node_statuses}" | grep -q 'NotReady' && \ + echo "${node_statuses}" | grep -v 'NotReady' | grep -q 'Ready' ; then + return 1 + fi + return 0 +} + +function kubernetes_any_node_ready() { + if kubectl get nodes --no-headers 2>/dev/null | awk '{ print $2 }' | grep -v 'NotReady' | grep -q 'Ready' ; then + return 0 + fi + return 1 +} + +# Helper function which calculates the amount of the given resource (either CPU or memory) +# to reserve in a given resource range, specified by a start and end of the range and a percentage +# of the resource to reserve. Note that we return zero if the start of the resource range is +# greater than the total resource capacity on the node. Additionally, if the end range exceeds the total +# resource capacity of the node, we use the total resource capacity as the end of the range. +# Args: +# $1 total available resource on the worker node in input unit (either millicores for CPU or Mi for memory) +# $2 start of the resource range in input unit +# $3 end of the resource range in input unit +# $4 percentage of range to reserve in percent*100 (to allow for two decimal digits) +# Return: +# amount of resource to reserve in input unit +function get_resource_to_reserve_in_range() { + local total_resource_on_instance=$1 + local start_range=$2 + local end_range=$3 + local percentage=$4 + resources_to_reserve="0" + if (( $total_resource_on_instance > $start_range )); then + resources_to_reserve=$(((($total_resource_on_instance < $end_range ? \ + $total_resource_on_instance : $end_range) - $start_range) * $percentage / 100 / 100)) + fi + echo $resources_to_reserve +} + +# Calculates the amount of memory to reserve for the kubelet in mebibytes from the total memory available on the instance. +# From the total memory capacity of this worker node, we calculate the memory resources to reserve +# by reserving a percentage of the memory in each range up to the total memory available on the instance. +# We are using these memory ranges from GKE (https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture#node_allocatable): +# 255 Mi of memory for machines with less than 1024Mi of memory +# 25% of the first 4096Mi of memory +# 20% of the next 4096Mi of memory (up to 8192Mi) +# 10% of the next 8192Mi of memory (up to 16384Mi) +# 6% of the next 114688Mi of memory (up to 131072Mi) +# 2% of any memory above 131072Mi +# Args: +# $1 total available memory on the machine in Mi +# Return: +# memory to reserve in Mi for the kubelet +function get_memory_mebibytes_to_reserve() { + local total_memory_on_instance=$1 + local memory_ranges=(0 4096 8192 16384 131072 $total_memory_on_instance) + local memory_percentage_reserved_for_ranges=(2500 2000 1000 600 200) + if (( $total_memory_on_instance <= 1024 )); then + memory_to_reserve="255" + else + memory_to_reserve="0" + for i in ${!memory_percentage_reserved_for_ranges[@]}; do + local start_range=${memory_ranges[$i]} + local end_range=${memory_ranges[(($i+1))]} + local percentage_to_reserve_for_range=${memory_percentage_reserved_for_ranges[$i]} + memory_to_reserve=$(($memory_to_reserve + \ + $(get_resource_to_reserve_in_range $total_memory_on_instance $start_range $end_range $percentage_to_reserve_for_range))) + done + fi + echo $memory_to_reserve +} + +# Calculates the amount of CPU to reserve for the kubelet in millicores from the total number of vCPUs available on the instance. +# From the total core capacity of this worker node, we calculate the CPU resources to reserve by reserving a percentage +# of the available cores in each range up to the total number of cores available on the instance. +# We are using these CPU ranges from GKE (https://cloud.google.com/kubernetes-engine/docs/concepts/cluster-architecture#node_allocatable): +# 6% of the first core +# 1% of the next core (up to 2 cores) +# 0.5% of the next 2 cores (up to 4 cores) +# 0.25% of any cores above 4 cores +# Args: +# $1 total number of millicores on the instance (number of vCPUs * 1000) +# Return: +# CPU resources to reserve in millicores (m) +function get_cpu_millicores_to_reserve() { + local total_cpu_on_instance=$1 + local cpu_ranges=(0 1000 2000 4000 $total_cpu_on_instance) + local cpu_percentage_reserved_for_ranges=(600 100 50 25) + cpu_to_reserve="0" + for i in ${!cpu_percentage_reserved_for_ranges[@]}; do + local start_range=${cpu_ranges[$i]} + local end_range=${cpu_ranges[(($i+1))]} + local percentage_to_reserve_for_range=${cpu_percentage_reserved_for_ranges[$i]} + cpu_to_reserve=$(($cpu_to_reserve + \ + $(get_resource_to_reserve_in_range $total_cpu_on_instance $start_range $end_range $percentage_to_reserve_for_range))) + done + echo $cpu_to_reserve +} + +function file_exists() { + local filename=$1 + + if ! test -f "$filename"; then + return 1 + fi +} + +# checks if the service in ns $1 with name $2 has endpoints +function kubernetes_service_healthy() { + local namespace=$1 + local name=$2 + + kubectl -n "$namespace" get endpoints "$name" --no-headers | grep -v "" &>/dev/null +} + +function kubernetes_version_minor() { + local k8sVersion="$1" + # shellcheck disable=SC2001 + echo "$k8sVersion" | sed 's/v\?[0-9]*\.\([0-9]*\)\.[0-9]*/\1/' +} + +# kubernetes_configure_pause_image will make kubelet aware of the pause (sandbox) image used by +# containerd. This will prevent the kubelet from garbage collecting the pause image. +# This flag will be removed in kubernetes 1.27. +# NOTE: this configures the kubelet to use the pause image used by containerd rather than the other +# way around. +function kubernetes_configure_pause_image() { + local dir="$1" + #shellcheck disable=SC2153 + if [ "$KUBERNETES_TARGET_VERSION_MAJOR" != 1 ] || [ "$KUBERNETES_TARGET_VERSION_MINOR" -ge 27 ]; then + return + fi + + local CONTAINERD_PAUSE_IMAGE= + #shellcheck disable=SC2034 + CONTAINERD_PAUSE_IMAGE="$(kubernetes_containerd_pause_image)" + if [ -z "$CONTAINERD_PAUSE_IMAGE" ]; then + return + fi + + insert_patches_strategic_merge "$dir/kustomization.yaml" "kubelet-args-pause-image.patch.yaml" + render_yaml_file_2 "$dir/kubelet-args-pause-image.patch.tmpl.yaml" > "$dir/kubelet-args-pause-image.patch.yaml" + + # templatize the kubeadm version field as this will be rendered (again) when generating the final kubeadm conf file + # shellcheck disable=SC2016 + sed -i 's|kubeadm.k8s.io/v1beta.*|kubeadm.k8s.io/$(kubeadm_conf_api_version)|' "$dir/kubelet-args-pause-image.patch.yaml" +} + +KUBELET_FLAGS_FILE="/var/lib/kubelet/kubeadm-flags.env" + +# KURL_HOSTNAME_OVERRIDE can be used to override the node name used by kURL +KURL_HOSTNAME_OVERRIDE=${KURL_HOSTNAME_OVERRIDE:-} + +# kubernetes_init_hostname sets the HOSTNAME variable to equal the hostname binary output. If +# KURL_HOSTNAME_OVERRIDE is set, it will be used instead. Otherwise, if the kubelet flags file +# contains a --hostname-override flag, it will be used instead. +function kubernetes_init_hostname() { + export HOSTNAME + if [ -n "$KURL_HOSTNAME_OVERRIDE" ]; then + HOSTNAME="$KURL_HOSTNAME_OVERRIDE" + fi + local hostname_override= + hostname_override="$(kubernetes_get_kubelet_hostname_override)" + if [ -n "$hostname_override" ] ; then + HOSTNAME="$hostname_override" + fi + HOSTNAME="$(hostname | tr '[:upper:]' '[:lower:]')" +} + +# kubernetes_get_kubelet_hostname_override returns the value of the --hostname-override flag in the +# kubelet env flags file. +function kubernetes_get_kubelet_hostname_override() { + if [ -f "$KUBELET_FLAGS_FILE" ]; then + grep -o '\--hostname-override=[^" ]*' "$KUBELET_FLAGS_FILE" | awk -F'=' '{ print $2 }' + fi +} + +# kubernetes_configure_pause_image_upgrade will check if the pause image used by containerd has +# changed. If it has, it will update the kubelet flags to use the new pause image and restart the +# kubelet. +function kubernetes_configure_pause_image_upgrade() { + local CONTAINERD_PAUSE_IMAGE= + #shellcheck disable=SC2034 + CONTAINERD_PAUSE_IMAGE="$(kubernetes_containerd_pause_image)" + if [ -z "$CONTAINERD_PAUSE_IMAGE" ]; then + return + fi + + if [ ! -f "$KUBELET_FLAGS_FILE" ]; then + return + fi + + local old_pause_image= + old_pause_image="$(grep -o '\--pod-infra-container-image=[^" ]*' "$KUBELET_FLAGS_FILE" | awk -F'=' '{ print $2 }')" + + # if the pause image is not set this may be a version of kubelet that does not support the flag + if [ -z "$old_pause_image" ] || [ "$old_pause_image" = "$CONTAINERD_PAUSE_IMAGE" ]; then + return + fi + + sed -i "s|$old_pause_image|$CONTAINERD_PAUSE_IMAGE|" "$KUBELET_FLAGS_FILE" + + systemctl daemon-reload + systemctl restart kubelet +} + +# kubernetes_containerd_pause_image will return the pause image used by containerd. +function kubernetes_containerd_pause_image() { + if [ -z "$CONTAINERD_VERSION" ] || [ ! -f /etc/containerd/config.toml ] ; then + return + fi + grep sandbox_image /etc/containerd/config.toml | sed 's/[=\"]//g' | awk '{ print $2 }' +} + +# kubernetes_kustomize_config_migrate fixes missing and deprecated fields in kustomization file +function kubernetes_kustomize_config_migrate() { + local kustomize_dir=$1 + if [ "$KUBERNETES_TARGET_VERSION_MINOR" -ge "27" ]; then + # TODO: Currently this is using kustomize 3.5.4 to migrate the config due to a bug in + # kustomize v5: https://github.com/kubernetes-sigs/kustomize/issues/5149 + ( cd "$kustomize_dir" && kustomize edit fix ) + fi +} + +# kubernetes_configure_coredns is a workaround to reset the custom nameserver config in the coredns +# configmap. This runs after kubeadm init or upgrade which will reset the coredns configmap if it +# finds that it is the default. the issue is that it does a fuzzy match and if only the nameserver +# is set kubeadm determines that it is the default and it replaces the configmap. +function kubernetes_configure_coredns() { + if [ -z "$NAMESERVER" ]; then + return 0 + fi + kubectl -n kube-system get configmap coredns -oyaml > /tmp/Corefile + # Example lines to replace from k8s 1.17 and 1.19 + # "forward . /etc/resolv.conf" => "forward . 8.8.8.8" + # "forward . /etc/resolv.conf {" => "forward . 8.8.8.8 {" + sed -i "s/forward \. \/etc\/resolv\.conf/forward \. ${NAMESERVER}/" /tmp/Corefile + kubectl -n kube-system replace configmap coredns -f /tmp/Corefile + kubectl -n kube-system rollout restart deployment/coredns +} + +# shellcheck disable=SC2148 +function object_store_exists() { + if [ -n "$OBJECT_STORE_ACCESS_KEY" ] && \ + [ -n "$OBJECT_STORE_SECRET_KEY" ] && \ + [ -n "$OBJECT_STORE_CLUSTER_IP" ]; then + return 0 + else + return 1 + fi +} + +function object_store_running() { + if kubernetes_resource_exists rook-ceph secret rook-ceph-object-user-rook-ceph-store-kurl || kubernetes_resource_exists minio get secret minio-credentials; then + return 0 + fi + return 1 +} + +function object_store_create_bucket() { + if object_store_bucket_exists "$1" ; then + echo "object store bucket $1 exists" + return 0 + fi + if ! _object_store_create_bucket "$1" ; then + if object_store_exists; then + return 1 + fi + bail "attempted to create bucket $1 but no object store configured" + fi + echo "object store bucket $1 created" +} + +function _object_store_create_bucket() { + local bucket=$1 + local acl="x-amz-acl:private" + local d=$(LC_TIME="en_US.UTF-8" TZ="UTC" date +"%a, %d %b %Y %T %z") + local string="PUT\n\n\n${d}\n${acl}\n/$bucket" + local sig=$(echo -en "${string}" | openssl dgst -sha1 -hmac "${OBJECT_STORE_SECRET_KEY}" -binary | base64) + + local addr=$($DIR/bin/kurl netutil format-ip-address "$OBJECT_STORE_CLUSTER_IP") + curl -fsSL -X PUT \ + --globoff \ + --noproxy "*" \ + -H "Host: $OBJECT_STORE_CLUSTER_IP" \ + -H "Date: $d" \ + -H "$acl" \ + -H "Authorization: AWS $OBJECT_STORE_ACCESS_KEY:$sig" \ + "http://$addr/$bucket" >/dev/null 2>&1 +} + +function object_store_bucket_exists() { + local bucket=$1 + local acl="x-amz-acl:private" + local d=$(LC_TIME="en_US.UTF-8" TZ="UTC" date +"%a, %d %b %Y %T %z") + local string="HEAD\n\n\n${d}\n${acl}\n/$bucket" + local sig=$(echo -en "${string}" | openssl dgst -sha1 -hmac "${OBJECT_STORE_SECRET_KEY}" -binary | base64) + + local addr=$($DIR/bin/kurl netutil format-ip-address "$OBJECT_STORE_CLUSTER_IP") + curl -fsSL -I \ + --globoff \ + --noproxy "*" \ + -H "Host: $OBJECT_STORE_CLUSTER_IP" \ + -H "Date: $d" \ + -H "$acl" \ + -H "Authorization: AWS $OBJECT_STORE_ACCESS_KEY:$sig" \ + "http://$addr/$bucket" >/dev/null 2>&1 +} + +# migrate_object_store creates a pod that migrates data between two different object stores. receives +# the namespace, the source and destination addresses, access keys and secret keys. returns once the +# pos has been finished or a timeout of 30 minutes has been reached. +function migrate_object_store() { + local namespace=$1 + local source_addr=$2 + local source_access_key=$3 + local source_secret_key=$4 + local destination_addr=$5 + local destination_access_key=$6 + local destination_secret_key=$7 + + kubectl -n "$namespace" delete pod sync-object-store --force --grace-period=0 --ignore-not-found + + cat < /dev/null + return 0 + fi + + return 1 +} + +function migrate_between_object_stores() { + local source_host=$1 + local source_access_key=$2 + local source_secret_key=$3 + local destination_host=$4 + local destination_addr=$5 + local destination_access_key=$6 + local destination_secret_key=$7 + + if kubernetes_resource_exists kurl deployment ekc-operator; then + kubectl -n kurl scale deploy ekc-operator --replicas=0 + log "Waiting for ekco pods to be removed" + if ! spinner_until 120 ekco_pods_gone; then + logFail "Unable to scale down ekco operator" + return 1 + fi + fi + + get_shared + + if ! migrate_object_store "default" "$source_host" "$source_access_key" "$source_secret_key" "$destination_host" "$destination_access_key" "$destination_secret_key" ; then + # even if the migration failed, we need to ensure ekco is running again + if kubernetes_resource_exists kurl deployment ekc-operator; then + kubectl -n kurl scale deploy ekc-operator --replicas=1 + fi + bail "sync-object-store pod failed" + fi + + # ensure ekco is running again + if kubernetes_resource_exists kurl deployment ekc-operator; then + kubectl -n kurl scale deploy ekc-operator --replicas=1 + fi + + # Update kotsadm to use new object store + if kubernetes_resource_exists default secret kotsadm-s3; then + echo "Updating kotsadm to use $destination_host" + kubectl patch secret kotsadm-s3 -p "{\"stringData\":{\"access-key-id\":\"${destination_access_key}\",\"secret-access-key\":\"${destination_secret_key}\",\"endpoint\":\"http://${destination_host}\",\"object-store-cluster-ip\":\"${destination_addr}\"}}" + + if kubernetes_resource_exists default deployment kotsadm; then + kubectl rollout restart deployment kotsadm + elif kubernetes_resource_exists default statefulset kotsadm; then + kubectl rollout restart statefulset kotsadm + fi + fi + + local newIP=$($DIR/bin/kurl netutil format-ip-address "$destination_addr") + # Update registry to use new object store + if kubernetes_resource_exists kurl configmap registry-config; then + echo "Updating registry to use $destination_host" + local temp_file= + temp_file=$(mktemp) + kubectl -n kurl get configmap registry-config -ojsonpath='{ .data.config\.yml }' | sed "s/regionendpoint: http.*/regionendpoint: http:\/\/${newIP}/" > "$temp_file" + kubectl -n kurl delete configmap registry-config + kubectl -n kurl create configmap registry-config --from-file=config.yml="$temp_file" + rm "$temp_file" + fi + if kubernetes_resource_exists kurl secret registry-s3-secret; then + kubectl -n kurl patch secret registry-s3-secret -p "{\"stringData\":{\"access-key-id\":\"${destination_access_key}\",\"secret-access-key\":\"${destination_secret_key}\",\"object-store-cluster-ip\":\"${destination_addr}\",\"object-store-hostname\":\"http://${destination_host}\"}}" + fi + if kubernetes_resource_exists kurl deployment registry; then + kubectl -n kurl rollout restart deployment registry + fi + + # Update velero to use new object store only if currently using object store since velero may have already been + # updated to use an off-cluster object store. + if kubernetes_resource_exists velero backupstoragelocation default; then + echo "Updating velero to use new object store $destination_host" + s3Url=$(kubectl -n velero get backupstoragelocation default -ojsonpath='{ .spec.config.s3Url }') + if [ "$s3Url" = "http://${source_host}" ]; then + kubectl -n velero patch backupstoragelocation default --type=merge -p "{\"spec\":{\"config\":{\"s3Url\":\"http://${destination_host}\",\"publicUrl\":\"http://${newIP}\"}}}" + + while read -r resticrepo; do + oldResticIdentifier=$(kubectl -n velero get resticrepositories "$resticrepo" -ojsonpath="{ .spec.resticIdentifier }") + newResticIdentifier=$(echo "$oldResticIdentifier" | sed "s/${source_host}/${destination_host}/") + kubectl -n velero patch resticrepositories "$resticrepo" --type=merge -p "{\"spec\":{\"resticIdentifier\":\"${newResticIdentifier}\"}}" + done < <(kubectl -n velero get resticrepositories --selector=velero.io/storage-location=default --no-headers | awk '{ print $1 }') + else + echo "The Velero default backupstoragelocation was not $source_host, not updating to use $destination_host" + fi + fi + if kubernetes_resource_exists velero secret cloud-credentials; then + if kubectl -n velero get secret cloud-credentials -ojsonpath='{ .data.cloud }' | base64 -d | grep -q "$source_access_key"; then + local temp_file= + temp_file=$(mktemp) + kubectl -n velero get secret cloud-credentials -ojsonpath='{ .data.cloud }' | base64 -d > "$temp_file" + sed -i "s/aws_access_key_id=.*/aws_access_key_id=${destination_access_key}/" "$temp_file" + sed -i "s/aws_secret_access_key=.*/aws_secret_access_key=${destination_secret_key}/" "$temp_file" + cloud=$(cat "$temp_file" | base64 -w 0) + kubectl -n velero patch secret cloud-credentials -p "{\"data\":{\"cloud\":\"${cloud}\"}}" + rm "$temp_file" + else + echo "The Velero cloud-credentials secret did not contain credentials for $source_host, not updating to use $destination_host credentials" + fi + fi + if kubernetes_resource_exists velero daemonset restic; then + kubectl -n velero rollout restart daemonset restic + fi + if kubernetes_resource_exists velero deployment velero; then + kubectl -n velero rollout restart deployment velero + fi + + printf "\n${GREEN}Object store migration completed successfully${NC}\n" + + return 0 +} + +function migrate_rgw_to_minio_checks() { + logStep "Running Rook Ceph Object Store to Minio migration checks ..." + + if ! rook_is_healthy_to_upgrade; then + bail "Cannot upgrade from Rook Ceph Object Store to Minio. Rook Ceph is unhealthy." + fi + + log "Wating for Rook Ceph Object Store health ..." + if ! spinner_until 300 rook_rgw_check_if_is_healthy ; then + logFail "Failed to detect healthy Rook Ceph Object Store" + bail "Cannot upgrade from Rook Ceph Object Store to Minio. Rook Ceph is unhealthy." + fi + + log "Awaiting 2 minutes to check MinIO Pod(s) are Running" + if ! spinner_until 120 check_for_running_pods "$MINIO_NAMESPACE"; then + logFail "MinIO has unhealthy Pod(s). Check the namespace $MINIO_NAMESPACE " + bail "Cannot upgrade from Rook to MinIO. MinIO is unhealthy." + fi + + logSuccess "Rook Ceph Object Store to Minio migration checks completed successfully." +} + +function rook_rgw_check_if_is_healthy() { + local IP=$(kubectl -n rook-ceph get service rook-ceph-rgw-rook-ceph-store | tail -n1 | awk '{ print $3}') + curl --globoff --noproxy "*" --fail --silent --insecure "http://${IP}" > /dev/null +} + +function migrate_rgw_to_minio() { + report_addon_start "rook-ceph-to-minio" "v1.1" + + migrate_rgw_to_minio_checks + + RGW_HOST="rook-ceph-rgw-rook-ceph-store.rook-ceph" + RGW_ACCESS_KEY_ID=$(kubectl -n rook-ceph get secret rook-ceph-object-user-rook-ceph-store-kurl -o yaml | grep AccessKey | head -1 | awk '{print $2}' | base64 --decode) + RGW_ACCESS_KEY_SECRET=$(kubectl -n rook-ceph get secret rook-ceph-object-user-rook-ceph-store-kurl -o yaml | grep SecretKey | head -1 | awk '{print $2}' | base64 --decode) + + MINIO_HOST="minio.${MINIO_NAMESPACE}" + MINIO_CLUSTER_IP=$(kubectl -n ${MINIO_NAMESPACE} get service minio | tail -n1 | awk '{ print $3}') + MINIO_ACCESS_KEY_ID=$(kubectl -n ${MINIO_NAMESPACE} get secret minio-credentials -ojsonpath='{ .data.MINIO_ACCESS_KEY }' | base64 --decode) + MINIO_ACCESS_KEY_SECRET=$(kubectl -n ${MINIO_NAMESPACE} get secret minio-credentials -ojsonpath='{ .data.MINIO_SECRET_KEY }' | base64 --decode) + + migrate_between_object_stores "$RGW_HOST" "$RGW_ACCESS_KEY_ID" "$RGW_ACCESS_KEY_SECRET" "$MINIO_HOST" "$MINIO_CLUSTER_IP" "$MINIO_ACCESS_KEY_ID" "$MINIO_ACCESS_KEY_SECRET" + + report_addon_success "rook-ceph-to-minio" "v1.1" +} + +function migrate_minio_to_rgw() { + local minio_ns="$MINIO_NAMESPACE" + if [ -z "$minio_ns" ]; then + minio_ns=minio + fi + + if ! kubernetes_resource_exists $minio_ns deployment minio && ! kubernetes_resource_exists $minio_ns statefulset ha-minio; then + return 0 + fi + + report_addon_start "minio-to-rook-ceph" "v1.1" + + MINIO_HOST="minio.${minio_ns}" + MINIO_ACCESS_KEY_ID=$(kubectl -n ${minio_ns} get secret minio-credentials -ojsonpath='{ .data.MINIO_ACCESS_KEY }' | base64 --decode) + MINIO_ACCESS_KEY_SECRET=$(kubectl -n ${minio_ns} get secret minio-credentials -ojsonpath='{ .data.MINIO_SECRET_KEY }' | base64 --decode) + + RGW_HOST="rook-ceph-rgw-rook-ceph-store.rook-ceph" + RGW_CLUSTER_IP=$(kubectl -n rook-ceph get service rook-ceph-rgw-rook-ceph-store | tail -n1 | awk '{ print $3}') + RGW_ACCESS_KEY_ID=$(kubectl -n rook-ceph get secret rook-ceph-object-user-rook-ceph-store-kurl -o yaml | grep AccessKey | head -1 | awk '{print $2}' | base64 --decode) + RGW_ACCESS_KEY_SECRET=$(kubectl -n rook-ceph get secret rook-ceph-object-user-rook-ceph-store-kurl -o yaml | grep SecretKey | head -1 | awk '{print $2}' | base64 --decode) + + migrate_between_object_stores "$MINIO_HOST" "$MINIO_ACCESS_KEY_ID" "$MINIO_ACCESS_KEY_SECRET" "$RGW_HOST" "$RGW_CLUSTER_IP" "$RGW_ACCESS_KEY_ID" "$RGW_ACCESS_KEY_SECRET" + + report_addon_success "minio-to-rook-ceph" "v1.1" +} + +export KUBECTL_PLUGINS_PATH=/usr/local/bin + +function install_plugins() { + pushd "$DIR/krew" + tar xzf outdated.tar.gz && chown root:root outdated && mv outdated /usr/local/bin/kubectl-outdated + tar xzf preflight.tar.gz && chown root:root preflight && mv preflight /usr/local/bin/kubectl-preflight + tar xzf support-bundle.tar.gz && chown root:root support-bundle && mv support-bundle /usr/local/bin/kubectl-support_bundle + popd + + # uninstall system-wide krew from old versions of kurl + rm -rf /opt/replicated/krew + sed -i '/^export KUBECTL_PLUGINS_PATH.*KREW_ROOT/d' /etc/profile + sed -i '/^export KREW_ROOT.*replicated/d' /etc/profile +} + +function install_kustomize() { + if ! kubernetes_is_master; then + return 0 + elif [ ! -d "$DIR/packages/kubernetes/${k8sVersion}/assets" ]; then + echo "Kustomize package is missing in your distribution. Skipping." + return 0 + fi + + kustomize_dir=/usr/local/bin + + pushd "$DIR/packages/kubernetes/${k8sVersion}/assets" + for file in $(ls kustomize-*);do + if [ "${file: -6}" == "tar.gz" ];then + tar xf ${file} + chmod a+x kustomize + mv kustomize /usr/local/bin/${file%%.tar*} + else + # Earlier versions of kustomize weren't archived/compressed + chmod a+x ${file} + cp ${file} ${kustomize_dir} + fi + done + popd + + if ls ${kustomize_dir}/kustomize-* 1>/dev/null 2>&1;then + latest_binary=$(basename $(ls ${kustomize_dir}/kustomize-* | sort -V | tail -n 1)) + + # Link to the latest version + ln -s -f ${kustomize_dir}/${latest_binary} ${kustomize_dir}/kustomize + fi +} + + +# preflights are run on all nodes for init.sh, join.sh, and upgrade.sh +function preflights() { + require64Bit + bailIfUnsupportedOS + mustSwapoff + prompt_if_docker_unsupported_os + check_docker_k8s_version + checkFirewalld + checkUFW + must_disable_selinux + apply_iptables_config + cri_preflights + host_nameservers_reachable + allow_remove_docker_new_install + return 0 +} + +# init_preflights are only run on the first node init.sh +function init_preflights() { + kotsadm_prerelease + bail_when_no_object_store_and_s3_enabled + bail_if_kurl_pods_are_unhealthy + bail_if_unsupported_migration_from_rook_to_openebs + bail_if_unsupported_migration_from_longhorn_to_openebs + bail_if_kurl_version_is_lower_than_previous_config + return 0 +} + +# if kurl pods like ekco not be running then we should bail +function bail_if_kurl_pods_are_unhealthy() { + if commandExists kubectl; then + log "Awaiting 2 minutes to check kURL Pod(s) are Running" + if ! spinner_until 120 check_for_running_pods kurl; then + bail "Kurl has unhealthy Pod(s). Check the namespace kurl. Restarting the pod may fix the issue." + fi + fi +} + +function join_preflights() { + preflights_require_no_kubernetes_or_current_node + + return 0 +} + +function require_root_user() { + local user="$(id -un 2>/dev/null || true)" + if [ "$user" != "root" ]; then + bail "Error: this installer needs to be run as root." + fi +} + + +function require64Bit() { + case "$(uname -m)" in + *64) + ;; + *) + echo >&2 'Error: you are not using a 64bit platform.' + echo >&2 'This installer currently only supports 64bit platforms.' + exit 1 + ;; + esac +} + +function bailIfUnsupportedOS() { + case "$LSB_DIST$DIST_VERSION" in + ubuntu16.04) + logWarn "Install is not supported on Ubuntu 16.04. Installation of Kubernetes will be best effort." + ;; + ubuntu18.04|ubuntu20.04|ubuntu22.04) + ;; + rhel7.4|rhel7.5|rhel7.6|rhel7.7|rhel7.8|rhel7.9|rhel8.0|rhel8.1|rhel8.2|rhel8.3|rhel8.4|rhel8.5|rhel8.6|rhel8.7|rhel9.0|rhel9.1) + ;; + rocky9.0|rocky9.1|rocky9.2) + ;; + centos7.4|centos7.5|centos7.6|centos7.7|centos7.8|centos7.9|centos8|centos8.0|centos8.1|centos8.2|centos8.3|centos8.4) + ;; + amzn2) + ;; + ol7.4|ol7.5|ol7.6|ol7.7|ol7.8|ol7.9|ol8.0|ol8.1|ol8.2|ol8.3|ol8.4|ol8.5|ol8.6|ol8.7|ol8.8) + ;; + *) + bail "Kubernetes install is not supported on ${LSB_DIST} ${DIST_VERSION}. The list of supported operating systems can be viewed at https://kurl.sh/docs/install-with-kurl/system-requirements." + ;; + esac +} + +function mustSwapoff() { + if swap_is_on || swap_is_enabled; then + printf "\n${YELLOW}This application is incompatible with memory swapping enabled. Disable swap to continue?${NC} " + if confirmY ; then + printf "=> Running swapoff --all\n" + swapoff --all + if swap_fstab_enabled; then + swap_fstab_disable + fi + if swap_service_enabled; then + swap_service_disable + fi + if swap_azure_linux_agent_enabled; then + swap_azure_linux_agent_disable + fi + logSuccess "Swap disabled.\n" + else + bail "\nDisable swap with swapoff --all and remove all swap entries from /etc/fstab before re-running this script" + fi + fi +} + +function swap_is_on() { + swapon --summary | grep --quiet " " # todo this could be more specific, swapon -s returns nothing if its off +} + +function swap_is_enabled() { + swap_fstab_enabled || swap_service_enabled || swap_azure_linux_agent_enabled +} + +function swap_fstab_enabled() { + cat /etc/fstab | grep --quiet --ignore-case --extended-regexp '^[^#]+swap' +} + +function swap_fstab_disable() { + printf "=> Commenting swap entries in /etc/fstab \n" + sed --in-place=.bak '/\bswap\b/ s/^/#/' /etc/fstab + printf "=> A backup of /etc/fstab has been made at /etc/fstab.bak\n\n" + printf "\n${YELLOW}Changes have been made to /etc/fstab. We recommend reviewing them after completing this installation to ensure mounts are correctly configured.${NC}\n\n" + sleep 5 # for emphasis of the above ^ +} + +# This is a service on some Azure VMs that just enables swap +function swap_service_enabled() { + systemctl -q is-enabled temp-disk-swapfile 2>/dev/null +} + +function swap_service_disable() { + printf "=> Disabling temp-disk-swapfile service\n" + systemctl disable temp-disk-swapfile +} + +function swap_azure_linux_agent_enabled() { + cat /etc/waagent.conf 2>/dev/null | grep -q 'ResourceDisk.EnableSwap=y' +} + +function swap_azure_linux_agent_disable() { + printf "=> Disabling swap in Azure Linux Agent configuration file /etc/waagent.conf\n" + sed -i 's/ResourceDisk.EnableSwap=y/ResourceDisk.EnableSwap=n/g' /etc/waagent.conf +} + + +function check_docker_k8s_version() { + local version= + version="$(get_docker_version)" + + if [ -z "$version" ]; then + return + fi + + case "$KUBERNETES_TARGET_VERSION_MINOR" in + 14|15) + compareDockerVersions "$version" 1.13.1 + if [ "$COMPARE_DOCKER_VERSIONS_RESULT" -eq "-1" ]; then + bail "Minimum Docker version for Kubernetes $KUBERNETES_VERSION is 1.13.1." + fi + ;; + esac +} + +function prompt_if_docker_unsupported_os() { + if is_docker_version_supported ; then + return + fi + + logWarn "Docker ${DOCKER_VERSION} is not supported on ${LSB_DIST} ${DIST_VERSION}." + logWarn "The containerd addon is recommended. https://kurl.sh/docs/add-ons/containerd" + + if commandExists "docker" ; then + return + fi + + printf "${YELLOW}Continue? ${NC}" 1>&2 + if ! confirmN ; then + exit 1 + fi +} + +checkFirewalld() { + if [ -n "$PRESERVE_DOCKER_CONFIG" ]; then + return + fi + + apply_firewalld_config + + if [ "$BYPASS_FIREWALLD_WARNING" = "1" ]; then + return + fi + + if ! systemctl -q is-enabled firewalld && ! systemctl -q is-active firewalld; then + logSuccess "Firewalld is either not enabled or not active." + return + fi + + if [ "$HARD_FAIL_ON_FIREWALLD" = "1" ]; then + printf "${RED}Firewalld is currently either enabled or active. Stop (systemctl stop firewalld) and disable Firewalld (systemctl disable firewalld) before proceeding.{NC}\n" 1>&2 + exit 1 + fi + + if [ -n "$DISABLE_FIREWALLD" ]; then + systemctl stop firewalld + systemctl disable firewalld + return + fi + + printf "${YELLOW}Firewalld is currently either enabled or active. To ensure smooth installation and avoid potential issues, it is highly recommended to stop and disable Firewalld. Please press 'Y' to proceed with stopping and disabling Firewalld.${NC}" + if confirmY ; then + systemctl stop firewalld + systemctl disable firewalld + return + fi + + printf "${YELLOW}Please note that if you choose to continue with Firewalld enabled and active, the installer may encounter unexpected behaviors and may not function properly. Therefore, it is strongly advised to stop and completely disable Firewalld before proceeding. Continue with firewalld enabled and/or active?${NC}" + if confirmN ; then + BYPASS_FIREWALLD_WARNING=1 + return + fi + exit 1 +} + +checkUFW() { + if [ -n "$PRESERVE_DOCKER_CONFIG" ]; then + return + fi + + if [ "$BYPASS_UFW_WARNING" = "1" ]; then + return + fi + + # check if UFW is enabled and installed in systemctl + if ! systemctl -q is-active ufw ; then + return + fi + + # check if UFW is active/inactive + UFW_STATUS=$(ufw status | grep 'Status: ' | awk '{ print $2 }') + if [ "$UFW_STATUS" = "inactive" ]; then + return + fi + + if [ "$HARD_FAIL_ON_UFW" = "1" ]; then + printf "${RED}UFW is active${NC}\n" 1>&2 + exit 1 + fi + + if [ -n "$DISABLE_UFW" ]; then + ufw disable + return + fi + + printf "${YELLOW}UFW is active, please press Y to disable ${NC}" + if confirmY ; then + ufw disable + return + fi + + printf "${YELLOW}Continue with ufw active? ${NC}" + if confirmN ; then + BYPASS_UFW_WARNING=1 + return + fi + exit 1 +} + +must_disable_selinux() { + # From kubernets kubeadm docs for RHEL: + # + # Disabling SELinux by running setenforce 0 is required to allow containers to + # access the host filesystem, which is required by pod networks for example. + # You have to do this until SELinux support is improved in the kubelet. + + # Check and apply YAML overrides + if [ -n "$PRESERVE_SELINUX_CONFIG" ]; then + return + fi + + apply_selinux_config + if [ -n "$BYPASS_SELINUX_PREFLIGHT" ]; then + return + fi + + if selinux_enabled && selinux_enforced ; then + if [ -n "$DISABLE_SELINUX" ]; then + setenforce 0 + sed -i s/^SELINUX=.*$/SELINUX=permissive/ /etc/selinux/config + return + fi + + printf "\n${YELLOW}Kubernetes is incompatible with SELinux. Disable SELinux to continue?${NC} " + if confirmY ; then + setenforce 0 + sed -i s/^SELINUX=.*$/SELINUX=permissive/ /etc/selinux/config + else + bail "\nDisable SELinux with 'setenforce 0' before re-running install script" + fi + fi +} + +function force_docker() { + DOCKER_VERSION="20.10.17" + printf "${YELLOW}NO CRI version was listed in yaml or found on host OS, defaulting to online docker install${NC}\n" + printf "${YELLOW}THIS FEATURE IS NOT SUPPORTED AND WILL BE DEPRECATED IN FUTURE KURL VERSIONS${NC}\n" + printf "${YELLOW}The installer did not specify a version of Docker or Containerd to include, but having one is required by all kURL installation scripts. The latest supported version ($DOCKER_VERSION) of Docker will be installed.${NC}\n" +} + +function cri_preflights() { + require_cri +} + +function require_cri() { + if is_rhel_9_variant && [ -z "$CONTAINERD_VERSION" ]; then + bail "Containerd is required" + fi + + if commandExists docker ; then + SKIP_DOCKER_INSTALL=1 + return 0 + fi + + if commandExists ctr ; then + return 0 + fi + + if [ "$LSB_DIST" = "rhel" ]; then + if [ -n "$NO_CE_ON_EE" ] && [ -z "$CONTAINERD_VERSION" ]; then + printf "${RED}Enterprise Linux distributions require Docker Enterprise Edition. Please install Docker before running this installation script.${NC}\n" 1>&2 + return 0 + fi + fi + + if [ "$SKIP_DOCKER_INSTALL" = "1" ]; then + bail "Docker is required" + fi + + if [ -z "$DOCKER_VERSION" ] && [ -z "$CONTAINERD_VERSION" ]; then + force_docker + fi + + return 0 +} + +selinux_enabled() { + if commandExists "selinuxenabled"; then + selinuxenabled + return + elif commandExists "sestatus"; then + ENABLED=$(sestatus | grep 'SELinux status' | awk '{ print $3 }') + echo "$ENABLED" | grep --quiet --ignore-case enabled + return + fi + + return 1 +} + +selinux_enforced() { + if commandExists "getenforce"; then + ENFORCED=$(getenforce) + echo $(getenforce) | grep --quiet --ignore-case enforcing + return + elif commandExists "sestatus"; then + ENFORCED=$(sestatus | grep 'SELinux mode' | awk '{ print $3 }') + echo "$ENFORCED" | grep --quiet --ignore-case enforcing + return + fi + + return 1 +} + +function kotsadm_prerelease() { + if [ "$KOTSADM_VERSION" = "alpha" ] || [ "$KOTSADM_VERSION" = "nightly" ]; then + if [ -n "$TESTGRID_ID" ]; then + printf "\n${YELLOW}This is a prerelease version of kotsadm and should not be run in production. Continuing because this is testgrid.${NC}\n" + return 0 + else + printf "\n${YELLOW}This is a prerelease version of kotsadm and should not be run in production. Press Y to continue.${NC} " + if ! confirmN; then + bail "\nWill not install prerelease version of kotsadm." + fi + fi + fi +} + +function host_nameservers_reachable() { + if [ -n "$NAMESERVER" ] || [ "$AIRGAP" = "1" ]; then + return 0 + fi + if ! discover_non_loopback_nameservers; then + bail "\nAt least one nameserver must be accessible on a non-loopback address. Use the \"nameserver\" flag in the installer spec to override the loopback nameservers discovered on the host: https://kurl.sh/docs/add-ons/kurl" + fi +} + +function preflights_require_no_kubernetes_or_current_node() { + if kubernetes_is_join_node ; then + if kubernetes_is_current_cluster "${API_SERVICE_ADDRESS}" ; then + return 0 + fi + + logWarn "Kubernetes is already installed on this Node but the api server endpoint is different." + printf "${YELLOW}Are you sure you want to proceed? ${NC}" 1>&2 + if ! confirmN; then + exit 1 + fi + return 0 + fi + + if kubernetes_is_installed ; then + bail "Kubernetes is already installed on this Node." + fi + + return 0 +} + +function preflights_system_packages() { + local addonName=$1 + local addonVersion=$2 + + local manifestPath="${DIR}/addons/${addonName}/${addonVersion}/Manifest" + local preflightPath="${DIR}/addons/${addonName}/${addonVersion}/system-packages-preflight.yaml" + + if [ ! -f "$manifestPath" ]; then + return + fi + + local pkgs_all=() + local pkgs_ubuntu=() + local pkgs_centos=() + local pkgs_centos8=() + local pkgs_ol=() + + while read -r line; do + if [ -z "$line" ]; then + continue + fi + # support for comments in manifest files + if [ "$(echo "$line" | cut -c1-1)" = "#" ]; then + continue + fi + kind=$(echo "$line" | awk '{ print $1 }') + + case "$kind" in + apt) + package=$(echo "${line}" | awk '{ print $2 }') + pkgs_ubuntu+=("${package}") + pkgs_all+=("${package}") + ;; + + yum) + package=$(echo "${line}" | awk '{ print $2 }') + pkgs_centos+=("${package}") + pkgs_all+=("${package}") + ;; + + yum8) + package=$(echo "${line}" | awk '{ print $2 }') + pkgs_centos8+=("${package}") + pkgs_all+=("${package}") + ;; + + yumol) + package=$(echo "${line}" | awk '{ print $2 }') + pkgs_ol+=("${package}") + pkgs_all+=("${package}") + ;; + esac + done < "${manifestPath}" + + if [ "${#pkgs_all[@]}" -eq "0" ]; then + return + fi + + local system_packages_collector=" +systemPackages: + collectorName: $addonName +" + + local system_packages_analyzer=" +systemPackages: + collectorName: $addonName + outcomes: + - fail: + when: '{{ not .IsInstalled }}' + message: Package {{ .Name }} is not installed. + - pass: + message: Package {{ .Name }} is installed. +" + + for pkg in "${pkgs_ubuntu[@]}" + do + system_packages_collector=$("${DIR}"/bin/yamlutil -a -yc "$system_packages_collector" -yp systemPackages_ubuntu[] -v "$pkg") + done + + for pkg in "${pkgs_centos[@]}" + do + system_packages_collector=$("${DIR}"/bin/yamlutil -a -yc "$system_packages_collector" -yp systemPackages_centos[] -v "$pkg") + system_packages_collector=$("${DIR}"/bin/yamlutil -a -yc "$system_packages_collector" -yp systemPackages_rhel[] -v "$pkg") + system_packages_collector=$("${DIR}"/bin/yamlutil -a -yc "$system_packages_collector" -yp systemPackages_ol[] -v "$pkg") + system_packages_collector=$("${DIR}"/bin/yamlutil -a -yc "$system_packages_collector" -yp systemPackages_amzn[] -v "$pkg") + done + + for pkg in "${pkgs_centos8[@]}" + do + system_packages_collector=$("${DIR}"/bin/yamlutil -a -yc "$system_packages_collector" -yp systemPackages_centos8[] -v "$pkg") + system_packages_collector=$("${DIR}"/bin/yamlutil -a -yc "$system_packages_collector" -yp systemPackages_rhel8[] -v "$pkg") + system_packages_collector=$("${DIR}"/bin/yamlutil -a -yc "$system_packages_collector" -yp systemPackages_ol8[] -v "$pkg") + system_packages_collector=$("${DIR}"/bin/yamlutil -a -yc "$system_packages_collector" -yp systemPackages_centos9[] -v "$pkg") + system_packages_collector=$("${DIR}"/bin/yamlutil -a -yc "$system_packages_collector" -yp systemPackages_rhel9[] -v "$pkg") + system_packages_collector=$("${DIR}"/bin/yamlutil -a -yc "$system_packages_collector" -yp systemPackages_ol9[] -v "$pkg") + done + + for pkg in "${pkgs_ol[@]}" + do + system_packages_collector=$("${DIR}"/bin/yamlutil -a -yc "$system_packages_collector" -yp systemPackages_ol[] -v "$pkg") + done + + # host preflight file not found, create one + rm -rf "$preflightPath" + mkdir -p "$(dirname "$preflightPath")" + cat <> "$preflightPath" +apiVersion: troubleshoot.sh/v1beta2 +kind: HostPreflight +metadata: + name: "$addonName" +spec: + collectors: [] + analyzers: [] +EOF + + "${DIR}"/bin/yamlutil -a -fp "$preflightPath" -yp spec_collectors[] -v "$system_packages_collector" + "${DIR}"/bin/yamlutil -a -fp "$preflightPath" -yp spec_analyzers[] -v "$system_packages_analyzer" + + echo "$preflightPath" +} + +HOST_PREFLIGHTS_RESULTS_OUTPUT_DIR="host-preflights" +function host_preflights() { + local is_primary="$1" + local is_join="$2" + local is_upgrade="$3" + + local opts= + + local out_file= + out_file="${DIR}/${HOST_PREFLIGHTS_RESULTS_OUTPUT_DIR}/results-$(date +%s).txt" + + mkdir -p "${DIR}/${HOST_PREFLIGHTS_RESULTS_OUTPUT_DIR}" + + if [ ! "${HOST_PREFLIGHT_ENFORCE_WARNINGS}" = "1" ] ; then + opts="${opts} --ignore-warnings" + fi + if [ "${is_primary}" != "1" ]; then + opts="${opts} --is-primary=false" + fi + if [ "${is_join}" = "1" ]; then + opts="${opts} --is-join" + fi + if [ "${is_upgrade}" = "1" ]; then + opts="${opts} --is-upgrade" + fi + + # Remove previous file if it exists + if [ -f "${VENDOR_PREFLIGHT_SPEC}" ]; then + rm "$VENDOR_PREFLIGHT_SPEC" + fi + + $DIR/bin/vendorflights -i "${MERGED_YAML_SPEC}" -o "${VENDOR_PREFLIGHT_SPEC}" + if [ -f "${VENDOR_PREFLIGHT_SPEC}" ]; then + opts="${opts} --spec=${VENDOR_PREFLIGHT_SPEC}" + fi + + if [ "$EXCLUDE_BUILTIN_HOST_PREFLIGHTS" == "1" ]; then + opts="${opts} --exclude-builtin" + else + # Adding kurl addon preflight checks + for spec in $("${K8S_DISTRO}_addon_for_each" addon_preflight); do + opts="${opts} --spec=${spec}" + done + # Add containerd preflight checks separately since it's a special addon and is not part of the addons array + for spec in $(addon_preflight containerd "$CONTAINERD_VERSION"); do + opts="${opts} --spec=${spec}" + done + fi + + if [ -n "$PRIMARY_HOST" ]; then + opts="${opts} --primary-host=${PRIMARY_HOST}" + fi + if [ -n "$SECONDARY_HOST" ]; then + opts="${opts} --secondary-host=${SECONDARY_HOST}" + fi + + logStep "Running host preflights" + if [ "${HOST_PREFLIGHT_IGNORE}" = "1" ]; then + "${DIR}"/bin/kurl host preflight "${MERGED_YAML_SPEC}" ${opts} | tee "${out_file}" + host_preflights_mkresults "${out_file}" "${opts}" + else + set +e + "${DIR}"/bin/kurl host preflight "${MERGED_YAML_SPEC}" ${opts} | tee "${out_file}" + local kurl_exit_code="${PIPESTATUS[0]}" + set -e + + host_preflights_mkresults "${out_file}" "${opts}" + + case $kurl_exit_code in + 3) + bail "Host preflights have warnings that block the installation." + ;; + 2) + logWarn "Host preflights have warnings" + logWarn "It is highly recommended to sort out the warning conditions before proceeding." + logWarn "Be aware that continuing with preflight warnings can result in failures." + log "" + logWarn "Would you like to continue?" + if ! confirmY ; then + bail "The installation will not continue" + fi + return 0 + ;; + 1) + bail "Host preflights have failures that block the installation." + ;; + esac + fi + if [ "${HOST_PREFLIGHT_IGNORE}" = "1" ]; then + logWarn "Using host-preflight-ignore flag to disregard any failures during the pre-flight checks" + + case $kurl_exit_code in + 3) + logFail "Host preflights have warnings that should block the installation." + return + ;; + 2) + logWarn "Host preflights have warnings which is highly recommended to sort out the conditions before proceeding." + return + ;; + 1) + logFail "Host preflights have failures that should block the installation." + return + ;; + esac + fi + logSuccess "Host preflights success" +} + +IN_CLUSTER_PREFLIGHTS_RESULTS_OUTPUT_DIR="in-cluster-preflights" +function cluster_preflights() { + local is_primary="$1" + local is_join="$2" + local is_upgrade="$3" + local opts= + local out_file= + out_file="${DIR}/${IN_CLUSTER_PREFLIGHTS_RESULTS_OUTPUT_DIR}/results-$(date +%s).txt" + + # Do not run those tests when/if kubernetes is not installed + if ! commandExists kubectl; then + return + fi + + if [ ! -f /etc/kubernetes/admin.conf ]; then + log "In cluster Preflights will not be executed because /etc/kubernetes/admin.conf is not found" + return + fi + + logStep "Running in cluster Preflights" + mkdir -p "${DIR}/${IN_CLUSTER_PREFLIGHTS_RESULTS_OUTPUT_DIR}" + + if [ ! "${HOST_PREFLIGHT_ENFORCE_WARNINGS}" = "1" ] ; then + opts="${opts} --ignore-warnings" + fi + if [ "${is_primary}" != "1" ]; then + opts="${opts} --is-primary=false" + fi + if [ "${is_join}" = "1" ]; then + opts="${opts} --is-join" + fi + if [ "${is_upgrade}" = "1" ]; then + opts="${opts} --is-upgrade" + fi + + if [ "$EXCLUDE_BUILTIN_HOST_PREFLIGHTS" == "1" ]; then + opts="${opts} --exclude-builtin" + fi + + if [ -n "$PRIMARY_HOST" ]; then + opts="${opts} --primary-host=${PRIMARY_HOST}" + fi + if [ -n "$SECONDARY_HOST" ]; then + opts="${opts} --secondary-host=${SECONDARY_HOST}" + fi + + + if [ "${HOST_PREFLIGHT_IGNORE}" = "1" ]; then + "${DIR}"/bin/kurl cluster preflight "${MERGED_YAML_SPEC}" ${opts} | tee "${out_file}" + host_preflights_mkresults "${out_file}" "${opts}" + else + set +e + "${DIR}"/bin/kurl cluster preflight "${MERGED_YAML_SPEC}" ${opts} | tee "${out_file}" + local kurl_exit_code="${PIPESTATUS[0]}" + set -e + + on_cluster_preflights_mkresults "${out_file}" "${opts}" + + case $kurl_exit_code in + 3) + bail "In cluster Preflights have warnings that block the installation." + ;; + 2) + logWarn "Preflights checks executed on cluster have warnings" + logWarn "It is highly recommended to sort out the warning conditions before proceeding." + logWarn "Be aware that continuing with preflight warnings can result in failures." + log "" + logWarn "Would you like to continue?" + if ! confirmY ; then + bail "The installation will not continue" + fi + return 0 + ;; + 1) + bail "In cluster Preflights checks have failures that block the installation." + ;; + esac + fi + if [ "${HOST_PREFLIGHT_IGNORE}" = "1" ]; then + logWarn "Using host-preflight-ignore flag to disregard any failures during the pre-flight checks" + case $kurl_exit_code in + 3) + logFail "On cluster preflights have warnings that should block the installation." + return + ;; + 2) + logWarn "On cluster preflights have warnings which is highly recommended to sort out the conditions before proceeding." + return + ;; + 1) + logFail "On cluster preflights have failures that should block the installation." + return + ;; + esac + fi + logSuccess "On cluster Preflights success" +} + +# host_preflights_mkresults will append cli data to preflight results file +function host_preflights_mkresults() { + local out_file="$1" + local opts="$2" + local kurl_version= + kurl_version="$(./bin/kurl version | grep version= | awk 'BEGIN { FS="=" }; { print $2 }')" + local tmp_file= + tmp_file="$(mktemp)" + echo -e "[version]\n${kurl_version}\n\n[options]\n${opts}\n\n[results]" | cat - "${out_file}" > "${tmp_file}" && mv "${tmp_file}" "${out_file}" + chmod -R +r "${DIR}/${HOST_PREFLIGHTS_RESULTS_OUTPUT_DIR}/" # make sure the file is readable by kots support bundle + rm -f "${tmp_file}" +} + +function on_cluster_preflights_mkresults() { + local out_file="$1" + local opts="$2" + local kurl_version= + kurl_version="$(./bin/kurl version | grep version= | awk 'BEGIN { FS="=" }; { print $2 }')" + local tmp_file= + tmp_file="$(mktemp)" + echo -e "[version]\n${kurl_version}\n\n[options]\n${opts}\n\n[results]" | cat - "${out_file}" > "${tmp_file}" && mv "${tmp_file}" "${out_file}" + chmod -R +r "${DIR}/${IN_CLUSTER_PREFLIGHTS_RESULTS_OUTPUT_DIR}/" # make sure the file is readable by kots support bundle + rm -f "${tmp_file}" +} + +# Uninstall Docker when containerd is selected to be installed and it is a new install +# So that, is possible to avoid conflicts +allow_remove_docker_new_install() { + # If docker is not installed OR if containerd is not in the spec + # then, the docker should not be uninstalled + if ! commandExists docker || [ -z "$CONTAINERD_VERSION" ]; then + return + fi + + # if k8s is installed already then, the docker should not be uninstalled + # so that it can be properly migrated to containerd + if commandExists kubectl ; then + return + fi + + printf "\n${YELLOW}Docker already exists on this machine and Kubernetes is not yet installed.${NC} " + printf "\n${YELLOW}In order to avoid conflicts when installing containerd, it is recommended that Docker be removed." + printf "\n${YELLOW}Remove Docker?${NC} " + if confirmY ; then + uninstall_docker_new_installs_with_containerd + else + logWarn "\nThe installation will continue, however, if this script fails due to package" + logWarn "conflicts, please uninstall Docker and re-run the install script." + fi +} + +# bail_if_unsupported_migration_from_rook_to_openebs will bail if the rook is being removed in favor of +# openebs and the openebs version does not support migrations from rook. +function bail_if_unsupported_migration_from_rook_to_openebs() { + if [ -z "$ROOK_VERSION" ] && [ -n "$OPENEBS_VERSION" ]; then + if commandExists kubectl; then + if kubectl get ns 2>/dev/null | grep -q rook-ceph; then + semverParse "$OPENEBS_VERSION" + # if $OPENEBS_VERSION is less than 3.3.0 + if [ "$major" -lt "3" ] || { [ "$major" = "3" ] && [ "$minor" -lt "3" ] ; }; then + logFail "The OpenEBS version $OPENEBS_VERSION cannot be installed." + bail "OpenEBS versions less than 3.3.0 do not support migrations from Rook" + fi + + # registry + openebs without rook requires minio + if [ -n "$REGISTRY_VERSION" ] && [ -z "$MINIO_VERSION" ]; then + logFail "Migration from Rook with Registry requires an object store." + bail "Please ensure that your installer also provides an object store with MinIO add-on." + fi + fi + fi + fi +} + +# bail_if_unsupported_migration_from_longhorn_to_openebs will bail if the longhorn is being removed in favor of +# openebs and the openebs version does not support migrations +function bail_if_unsupported_migration_from_longhorn_to_openebs() { + if [ -z "$LONGHORN_VERSION" ] && [ -n "$OPENEBS_VERSION" ]; then + if commandExists kubectl; then + if kubectl get ns 2>/dev/null | grep -q longhorn-system; then + semverParse "$OPENEBS_VERSION" + # if $OPENEBS_VERSION is less than 3.3.0 + if [ "$major" -lt "3" ] || { [ "$major" = "3" ] && [ "$minor" -lt "3" ] ; }; then + logFail "The OpenEBS version $OPENEBS_VERSION cannot be installed." + bail "OpenEBS versions less than 3.3.0 do not support migrations from Longhorn" + fi + # registry + openebs without rook requires minio + if [ -n "$REGISTRY_VERSION" ] && [ -z "$MINIO_VERSION" ]; then + if kubectl get ns | grep -q minio; then + logFail "Migration from Longhorn with Registry requires an object store." + bail "Please ensure that your installer also provides an object store with MinIO add-on." + fi + fi + fi + fi + fi +} + +# bail_when_no_object_store_and_s3_enabled will bail if Minio and Rook are not present and kotsadm.s3Disabled is false. +function bail_when_no_object_store_and_s3_enabled() { + if [ -z "$MINIO_VERSION" ] && [ -z "$ROOK_VERSION" ]; then + if [ -n "$KOTSADM_VERSION" ] && [ "$KOTSADM_DISABLE_S3" != "1" ]; then + logFail "KOTS with s3 enabled requires an object store." + bail "Please ensure that your installer also provides an object store with either the MinIO or Rook add-on." + fi + if [ -n "$VELERO_VERSION" ] && [ "$KOTSADM_DISABLE_S3" != "1" ]; then + logFail "Velero with KOTS s3 enabled requires an object store." + bail "Please, ensure that your installer also provides an object store with either the MinIO or Rook add-on." + fi + fi +} + +# not allow run the installer/upgrade when kurl version is lower than the previous applied before +function bail_if_kurl_version_is_lower_than_previous_config() { + local previous_kurl_version= + # do not fail the script if k8s is not installed or the cluster is down + previous_kurl_version="$(kurl_get_current_version 2>/dev/null || true)" + if [ -z "$previous_kurl_version" ]; then + previous_kurl_version="$(kurl_get_last_version 2>/dev/null || true)" + fi + if [ -z "$previous_kurl_version" ]; then + return + fi + + if [ -n "$KURL_VERSION" ]; then + semverCompare "$(echo "$KURL_VERSION" | sed 's/v//g')" "$(echo "$previous_kurl_version" | sed 's/v//g')" + if [ "$SEMVER_COMPARE_RESULT" = "-1" ]; then # greater than or equal to 14.2.21 + logFail "The current kURL release version $KURL_VERSION is less than the previously installed version $previous_kurl_version." + bail "Please use a kURL release version which is equal to or greater than the version used previously." + fi + fi + log "Previous kURL version used to install or update the cluster is $previous_kurl_version" + if [ -n "$KURL_VERSION" ]; then + log "and the current kURL version used is $KURL_VERSION" + fi +} + +# shellcheck disable=SC2148 +# Gather any additional information required from the user that could not be discovered and was not +# passed with a flag + +function prompts_can_prompt() { + # Need the TTY to accept input and stdout to display + # Prompts when running the script through the terminal but not as a subshell + if [ -c /dev/tty ]; then + return 0 + fi + return 1 +} + +function prompt() { + if ! prompts_can_prompt ; then + bail "Cannot prompt, shell is not interactive" + fi + + set +e + if [ -z ${TEST_PROMPT_RESULT+x} ]; then + read PROMPT_RESULT < /dev/tty + else + PROMPT_RESULT="$TEST_PROMPT_RESULT" + fi + set -e +} + +function confirmY() { + printf "(Y/n) " + if [ "$ASSUME_YES" = "1" ]; then + echo "Y" + return 0 + fi + if ! prompts_can_prompt ; then + echo "Y" + logWarn "Automatically accepting prompt, shell is not interactive" + return 0 + fi + prompt + if [ "$PROMPT_RESULT" = "n" ] || [ "$PROMPT_RESULT" = "N" ]; then + return 1 + fi + return 0 +} + +function confirmN() { + printf "(y/N) " + if [ "$ASSUME_YES" = "1" ]; then + echo "Y" + return 0 + fi + if ! prompts_can_prompt ; then + echo "N" + logWarn "Automatically declining prompt, shell is not interactive" + return 1 + fi + prompt + if [ "$PROMPT_RESULT" = "y" ] || [ "$PROMPT_RESULT" = "Y" ]; then + return 0 + fi + return 1 +} + +function join_prompts() { + if [ -n "$API_SERVICE_ADDRESS" ]; then + splitHostPort "$API_SERVICE_ADDRESS" + if [ -z "$PORT" ]; then + PORT="6443" + fi + KUBERNETES_MASTER_ADDR="$HOST" + KUBERNETES_MASTER_PORT="$PORT" + LOAD_BALANCER_ADDRESS="$HOST" + LOAD_BALANCER_PORT="$PORT" + else + prompt_for_master_address + splitHostPort "$KUBERNETES_MASTER_ADDR" + if [ -n "$PORT" ]; then + KUBERNETES_MASTER_ADDR="$HOST" + KUBERNETES_MASTER_PORT="$PORT" + else + KUBERNETES_MASTER_PORT="6443" + fi + LOAD_BALANCER_ADDRESS="$KUBERNETES_MASTER_ADDR" + LOAD_BALANCER_PORT="$KUBERNETES_MASTER_PORT" + API_SERVICE_ADDRESS="${KUBERNETES_MASTER_ADDR}:${KUBERNETES_MASTER_PORT}" + fi + prompt_for_token + prompt_for_token_ca_hash +} + +function prompt_for_token() { + if [ -n "$KUBEADM_TOKEN" ]; then + return + fi + if ! prompts_can_prompt ; then + bail "kubernetes.kubeadmToken required" + fi + + printf "Please enter the kubernetes discovery token.\n" + while true; do + printf "Kubernetes join token: " + prompt + if [ -n "$PROMPT_RESULT" ]; then + KUBEADM_TOKEN="$PROMPT_RESULT" + return + fi + done +} + +function prompt_for_token_ca_hash() { + if [ -n "$KUBEADM_TOKEN_CA_HASH" ]; then + return + fi + if ! prompts_can_prompt ; then + bail "kubernetes.kubeadmTokenCAHash required" + fi + + printf "Please enter the discovery token CA's hash.\n" + while true; do + printf "Kubernetes discovery token CA hash: " + prompt + if [ -n "$PROMPT_RESULT" ]; then + KUBEADM_TOKEN_CA_HASH="$PROMPT_RESULT" + return + fi + done +} + +function prompt_for_master_address() { + if [ -n "$KUBERNETES_MASTER_ADDR" ]; then + return + fi + if ! prompts_can_prompt ; then + bail "kubernetes.masterAddress required" + fi + + printf "Please enter the Kubernetes master address.\n" + printf "e.g. 10.128.0.4\n" + while true; do + printf "Kubernetes master address: " + prompt + if [ -n "$PROMPT_RESULT" ]; then + KUBERNETES_MASTER_ADDR="$PROMPT_RESULT" + return + fi + done +} + +function common_prompts() { + if [ -z "$PRIVATE_ADDRESS" ]; then + prompt_for_private_ip + fi + # TODO public address? only required for adding SAN to K8s API server cert + + prompt_airgap_preload_images + + if [ "$HA_CLUSTER" = "1" ] && [ "$EKCO_ENABLE_INTERNAL_LOAD_BALANCER" != "1" ]; then + prompt_for_load_balancer_address + fi +} + +function prompt_license() { + if [ -n "$LICENSE_URL" ]; then + if [ "$AIRGAP" = "1" ]; then + bail "License Agreements with Airgap installs are not supported yet.\n" + return + fi + curl --fail $LICENSE_URL || bail "Failed to fetch license at url: $LICENSE_URL" + printf "\n\nThe license text is reproduced above. To view the license in your browser visit $LICENSE_URL.\n\n" + printf "Do you accept the license agreement?" + if confirmN; then + printf "License Agreement Accepted. Continuing Installation.\n" + else + bail "License Agreement Not Accepted. 'y' or 'Y' needed to accept. Exiting installation." + fi + fi +} + +function prompt_for_load_balancer_address() { + local lastLoadBalancerAddress= + + if kubeadm_cluster_configuration >/dev/null 2>&1; then + lastLoadBalancerAddress="$(kubeadm_cluster_configuration | grep 'controlPlaneEndpoint:' | sed 's/controlPlaneEndpoint: \|"//g')" + if [ -n "$lastLoadBalancerAddress" ]; then + splitHostPort "$lastLoadBalancerAddress" + if [ "$HOST" = "$lastLoadBalancerAddress" ]; then + lastLoadBalancerAddress="$lastLoadBalancerAddress:6443" + fi + fi + fi + + if [ -n "$LOAD_BALANCER_ADDRESS" ] && [ -n "$lastLoadBalancerAddress" ]; then + splitHostPort "$LOAD_BALANCER_ADDRESS" + if [ "$HOST" = "$LOAD_BALANCER_ADDRESS" ]; then + LOAD_BALANCER_ADDRESS="$LOAD_BALANCER_ADDRESS:6443" + fi + if [ "$LOAD_BALANCER_ADDRESS" != "$lastLoadBalancerAddress" ]; then + LOAD_BALANCER_ADDRESS_CHANGED=1 + fi + fi + + if [ -z "$LOAD_BALANCER_ADDRESS" ] && [ -n "$lastLoadBalancerAddress" ]; then + LOAD_BALANCER_ADDRESS="$lastLoadBalancerAddress" + fi + + if [ -z "$LOAD_BALANCER_ADDRESS" ] && [ "$KUBERNETES_LOAD_BALANCER_USE_FIRST_PRIMARY" = "1" ]; then + # EKCO_ENABLE_INTERNAL_LOAD_BALANCER takes precedence + if [ -z "$EKCO_VERSION" ] || [ "$EKCO_ENABLE_INTERNAL_LOAD_BALANCER" != "1" ]; then + LOAD_BALANCER_ADDRESS="$PRIVATE_ADDRESS" + LOAD_BALANCER_PORT=6443 + fi + fi + + if [ -z "$LOAD_BALANCER_ADDRESS" ]; then + if ! prompts_can_prompt ; then + bail "kubernetes.loadBalancerAddress required" + fi + + if [ -n "$EKCO_VERSION" ] && semverCompare "$EKCO_VERSION" "0.11.0" && [ "$SEMVER_COMPARE_RESULT" -ge "0" ]; then + printf "\nIf you would like to bring your own load balancer to route external and internal traffic to the API servers, please enter a load balancer address.\n" + printf "HAProxy will be used to perform this load balancing internally if you do not provide a load balancer address.\n" + printf "Load balancer address: " + prompt + LOAD_BALANCER_ADDRESS="$PROMPT_RESULT" + if [ -z "$LOAD_BALANCER_ADDRESS" ]; then + EKCO_ENABLE_INTERNAL_LOAD_BALANCER=1 + fi + else + printf "Please enter a load balancer address to route external and internal traffic to the API servers.\n" + printf "In the absence of a load balancer address, all traffic will be routed to the first master.\n" + printf "Load balancer address: " + prompt + LOAD_BALANCER_ADDRESS="$PROMPT_RESULT" + if [ -z "$LOAD_BALANCER_ADDRESS" ]; then + LOAD_BALANCER_ADDRESS="$PRIVATE_ADDRESS" + LOAD_BALANCER_PORT=6443 + fi + fi + fi + + if [ -z "$LOAD_BALANCER_PORT" ]; then + splitHostPort "$LOAD_BALANCER_ADDRESS" + LOAD_BALANCER_ADDRESS="$HOST" + LOAD_BALANCER_PORT="$PORT" + fi + if [ -z "$LOAD_BALANCER_PORT" ]; then + LOAD_BALANCER_PORT=6443 + fi + + # localhost:6444 is the address of the internal load balancer + if [ "$LOAD_BALANCER_ADDRESS" = "localhost" ] && [ "$LOAD_BALANCER_PORT" = "6444" ]; then + EKCO_ENABLE_INTERNAL_LOAD_BALANCER=1 + fi + + if [ -n "$LOAD_BALANCER_ADDRESS" ]; then + $BIN_BASHTOYAML -c "$MERGED_YAML_SPEC" -f "load-balancer-address=${LOAD_BALANCER_ADDRESS}:${LOAD_BALANCER_PORT}" + fi +} + +# if remote nodes are in the cluster and this is an airgap install, prompt the user to run the +# load-images task on all remotes before proceeding because remaining steps may cause pods to +# be scheduled on those nodes with new images. +function prompt_airgap_preload_images() { + if [ "$AIRGAP" != "1" ]; then + return 0 + fi + + if ! kubernetes_has_remotes; then + return 0 + fi + + local unattended_nodes_missing_images=0 + + while read -r node; do + local nodeName=$(echo "$node" | awk '{ print $1 }') + if [ "$nodeName" = "$(get_local_node_name)" ]; then + continue + fi + if kubernetes_node_has_all_images "$nodeName"; then + continue + fi + local kurl_install_directory_flag="$(get_kurl_install_directory_flag "${KURL_INSTALL_DIRECTORY_FLAG}")" + printf "\nRun this script on node ${GREEN}${nodeName}${NC} to load required images before proceeding:\n" + printf "\n" + printf "${GREEN}\tcat ./tasks.sh | sudo bash -s load-images${kurl_install_directory_flag}${NC}" + printf "\n" + + if [ "${KURL_IGNORE_REMOTE_LOAD_IMAGES_PROMPT}" != "1" ]; then + if ! prompts_can_prompt ; then + unattended_nodes_missing_images=1 + continue + fi + + while true; do + echo "" + printf "Have images been loaded on node ${nodeName}? " + if confirmN ; then + break + fi + done + else + logWarn "Remote load-images task prompt explicitly ignored" + fi + done < <(kubectl get nodes --no-headers) + + if [ "$unattended_nodes_missing_images" = "1" ] ; then + bail "Preloading images required" + fi +} + +function prompt_for_private_ip() { + _count=0 + + if [ "$IPV6_ONLY" = "1" ]; then + _regex_ipv6="^[[:digit:]]+: ([^[:space:]]+)[[:space:]]+inet6 ([[:alnum:]:]+)" + while read -r _line; do + [[ $_line =~ $_regex_ipv6 ]] + if [ "${BASH_REMATCH[1]}" != "lo" ] && [ "${BASH_REMATCH[1]}" != "kube-ipvs0" ] && [ "${BASH_REMATCH[1]}" != "docker0" ] && [ "${BASH_REMATCH[1]}" != "weave" ] && [ "${BASH_REMATCH[1]}" != "antrea-gw0" ] && [ "${BASH_REMATCH[1]}" != "flannel.1" ] && [ "${BASH_REMATCH[1]}" != "cni0" ]; then + _iface_names[$((_count))]=${BASH_REMATCH[1]} + _iface_addrs[$((_count))]=${BASH_REMATCH[2]} + let "_count += 1" + fi + done <<< "$(ip -6 -o addr)" + else + _regex_ipv4="^[[:digit:]]+: ([^[:space:]]+)[[:space:]]+[[:alnum:]]+ ([[:digit:].]+)" + while read -r _line; do + [[ $_line =~ $_regex_ipv4 ]] + if [ "${BASH_REMATCH[1]}" != "lo" ] && [ "${BASH_REMATCH[1]}" != "kube-ipvs0" ] && [ "${BASH_REMATCH[1]}" != "docker0" ] && [ "${BASH_REMATCH[1]}" != "weave" ] && [ "${BASH_REMATCH[1]}" != "antrea-gw0" ] && [ "${BASH_REMATCH[1]}" != "flannel.1" ] && [ "${BASH_REMATCH[1]}" != "cni0" ]; then + _iface_names[$((_count))]=${BASH_REMATCH[1]} + _iface_addrs[$((_count))]=${BASH_REMATCH[2]} + let "_count += 1" + fi + done <<< "$(ip -4 -o addr)" + fi + + + if [ "$_count" -eq "0" ]; then + echo >&2 "Error: The installer couldn't discover any valid network interfaces on this machine." + echo >&2 "Check your network configuration and re-run this script again." + echo >&2 "If you want to skip this discovery process, pass the 'private-address' arg to this script, e.g. 'sudo ./install.sh private-address=1.2.3.4'" + exit 1 + elif [ "$_count" -eq "1" ]; then + PRIVATE_ADDRESS=${_iface_addrs[0]} + printf "The installer will use network interface '%s' (with IP address '%s')\n" "${_iface_names[0]}" "${_iface_addrs[0]}" + return + fi + + if ! prompts_can_prompt ; then + bail "Multiple network interfaces present, please select an IP address. Try passing the selected address to this script e.g. 'sudo ./install.sh private-address=1.2.3.4' or assign an IP address to the privateAddress field in the kurl add-on." + fi + + printf "The installer was unable to automatically detect the private IP address of this machine.\n" + printf "Please choose one of the following network interfaces:\n" + for i in $(seq 0 $((_count-1))); do + printf "[%d] %-5s\t%s\n" "$i" "${_iface_names[$i]}" "${_iface_addrs[$i]}" + done + while true; do + printf "Enter desired number (0-%d): " "$((_count-1))" + prompt + if [ -z "$PROMPT_RESULT" ]; then + continue + fi + if [ "$PROMPT_RESULT" -ge "0" ] && [ "$PROMPT_RESULT" -lt "$_count" ]; then + PRIVATE_ADDRESS=${_iface_addrs[$PROMPT_RESULT]} + printf "The installer will use network interface '%s' (with IP address '%s').\n" "${_iface_names[$PROMPT_RESULT]}" "$PRIVATE_ADDRESS" + return + fi + done +} + + +function proxy_bootstrap() { + if [ -n "$HTTP_PROXY" ]; then + ENV_PROXY_ADDRESS="$HTTP_PROXY" + export https_proxy="$HTTP_PROXY" + printf "The installer will use the proxy at '%s' (imported from env var 'HTTP_PROXY')\n" "$ENV_PROXY_ADDRESS" + elif [ -n "$http_proxy" ]; then + ENV_PROXY_ADDRESS="$http_proxy" + export https_proxy="$http_proxy" + printf "The installer will use the proxy at '%s' (imported from env var 'http_proxy')\n" "$ENV_PROXY_ADDRESS" + elif [ -n "$HTTPS_PROXY" ]; then + ENV_PROXY_ADDRESS="$HTTPS_PROXY" + printf "The installer will use the proxy at '%s' (imported from env var 'HTTPS_PROXY')\n" "$ENV_PROXY_ADDRESS" + elif [ -n "$https_proxy" ]; then + ENV_PROXY_ADDRESS="$https_proxy" + printf "The installer will use the proxy at '%s' (imported from env var 'https_proxy')\n" "$ENV_PROXY_ADDRESS" + fi + + if [ -n "$NO_PROXY" ]; then + ENV_NO_PROXY="$NO_PROXY" + elif [ -n "$no_proxy" ]; then + ENV_NO_PROXY="$no_proxy" + fi + + # Need to peek at the yaml spec to find if a proxy is needed to download the util binaries + if [ -n "$INSTALLER_SPEC_FILE" ]; then + local overrideProxy=$(grep "proxyAddress:" "$INSTALLER_SPEC_FILE" | grep -o "http[^'\" ]*") + if [ -n "$overrideProxy" ]; then + export https_proxy="$overrideProxy" + kubectl_no_proxy + echo "Bootstrapped proxy address from installer spec file: $https_proxy" + return + fi + fi + local proxy=$(echo "$INSTALLER_YAML" | grep "proxyAddress:" | grep -o "http[^'\" ]*") + if [ -n "$proxy" ]; then + export https_proxy="$proxy" + kubectl_no_proxy + echo "Bootstrapped proxy address from installer yaml: $https_proxy" + return + fi + + if [ -n "$ENV_PROXY_ADDRESS" ]; then + export https_proxy="$ENV_PROXY_ADDRESS" + kubectl_no_proxy + log "Bootstrapped proxy address from ENV_PROXY_ADDRESS: $https_proxy" + return + fi +} + +# check_proxy_config tries to check if is possible connect with the registry +# Th following code will check if the proxy is invalid by running crictl pull test/invalid/image:latest +# See that the image does not matter to us. We are looking here for proxy issues only and then, when the Proxy config +# not to be configured accurately we will face an issue like: +# E0525 09:01:01.952576 1399831 remote_image.go:167] "PullImage from image service failed" err="rpc error: code = Unknown desc = failed to pull and unpack image \"docker.io/test/invalid/image:latest\": failed to resolve reference \"docker.io/test/invalid/image:latest\": failed to do request: Head \"https://registry-1.docker.io/v2/test/invalid/image/manifests/latest\": proxyconnect tcp: dial tcp: lookup invalidproxy: Temporary failure in name resolution" image="test/invalid/image:latest" +# FATA[0000] pulling image: rpc error: code = Unknown desc = failed to pull and unpack image "docker.io/test/invalid/image:latest": failed to resolve reference "docker.io/test/invalid/image:latest": failed to do request: Head "https://registry-1.docker.io/v2/test/invalid/image/manifests/latest": proxyconnect tcp: dial tcp: lookup invalidproxy: Temporary failure in name resolution +function check_proxy_config() { + if [ -z "$CONTAINERD_VERSION" ]; then + return + fi + + logStep "Checking proxy configuration with Containerd" + + # Echo containerd Proxy config: + local proxy_config_file="/etc/systemd/system/containerd.service.d/http-proxy.conf" + if [ ! -f "$proxy_config_file" ]; then + log "Skipping test. No HTTP proxy configuration found." + return + fi + + echo "" + log "Proxy config:" + grep -v -e '^\[Service\]' -e '^# Generated by kURL' "$proxy_config_file" + echo "" + + if ! response=$(crictl pull test/invalid/image:latest 2>&1) && [[ $response =~ .*"proxy".* ]]; then + logWarn "Proxy connection issues were identified:" + error_message=$(echo "$response" | grep -oP '(?<=failed to do request: ).*' | sed -r 's/.*: //' | awk -F "\"" '{print $(NF-1)}' | sed -r 's/test\/invalid\/image:latest//') + logWarn "$error_message" + echo "" + logWarn "Please review the proxy configuration and ensure that it is valid." + logWarn "More info: https://kurl.sh/docs/install-with-kurl/proxy-installs" + return + fi + logSuccess "Unable to identify proxy problems" +} + + +function kubectl_no_proxy() { + if [ ! -f /etc/kubernetes/admin.conf ]; then + return + fi + kubectlEndpoint=$(cat /etc/kubernetes/admin.conf | grep 'server:' | awk '{ print $NF }' | sed -E 's/https?:\/\///g') + splitHostPort "$kubectlEndpoint" + if [ -n "$no_proxy" ]; then + export no_proxy="$no_proxy,$HOST" + else + export no_proxy="$HOST" + fi +} + +function configure_proxy() { + if [ "$NO_PROXY" = "1" ]; then + echo "Not using http proxy" + unset PROXY_ADDRESS + unset http_proxy + unset HTTP_PROXY + unset https_proxy + unset HTTPS_PROXY + return + fi + if [ -z "$PROXY_ADDRESS" ] && [ -z "$ENV_PROXY_ADDRESS" ]; then + log "Not using proxy address" + return + fi + if [ -z "$PROXY_ADDRESS" ]; then + PROXY_ADDRESS="$ENV_PROXY_ADDRESS" + fi + export https_proxy="$PROXY_ADDRESS" + echo "Using proxy address $PROXY_ADDRESS" +} + +function configure_no_proxy_preinstall() { + if [ -z "$PROXY_ADDRESS" ]; then + return + fi + + local addresses="localhost,127.0.0.1,.svc,.local,.default,kubernetes" + + if [ -n "$ENV_NO_PROXY" ]; then + addresses="${addresses},${ENV_NO_PROXY}" + fi + if [ -n "$PRIVATE_ADDRESS" ]; then + addresses="${addresses},${PRIVATE_ADDRESS}" + fi + if [ -n "$LOAD_BALANCER_ADDRESS" ]; then + addresses="${addresses},${LOAD_BALANCER_ADDRESS}" + fi + if [ -n "$ADDITIONAL_NO_PROXY_ADDRESSES" ]; then + addresses="${addresses},${ADDITIONAL_NO_PROXY_ADDRESSES}" + fi + + # filter duplicates + addresses=$(unique_no_proxy_addresses "$addresses") + + # kubeadm requires this in the environment to reach K8s masters + export no_proxy="$addresses" + NO_PROXY_ADDRESSES="$addresses" + echo "Exported no_proxy: $no_proxy" +} + +function configure_no_proxy() { + if [ -z "$PROXY_ADDRESS" ]; then + return + fi + + local addresses="localhost,127.0.0.1,.svc,.local,.default,kubernetes" + + if [ -n "$ENV_NO_PROXY" ]; then + addresses="${addresses},${ENV_NO_PROXY}" + fi + if [ -n "$KOTSADM_VERSION" ]; then + addresses="${addresses},kotsadm-rqlite,kotsadm-api-node" + fi + if [ -n "$ROOK_VERSION" ]; then + addresses="${addresses},.rook-ceph" + fi + if [ -n "$FLUENTD_VERSION" ]; then + addresses="${addresses},.logging" + fi + if [ -n "$REGISTRY_VERSION" ]; then + addresses="${addresses},.kurl" + fi + if [ -n "$PROMETHEUS_VERSION" ]; then + addresses="${addresses},.monitoring" + fi + if [ -n "$VELERO_VERSION" ] && [ -n "$VELERO_NAMESPACE" ]; then + addresses="${addresses},.${VELERO_NAMESPACE}" + fi + if [ -n "$MINIO_VERSION" ] && [ -n "$MINIO_NAMESPACE" ]; then + addresses="${addresses},.${MINIO_NAMESPACE}" + fi + + if [ -n "$PRIVATE_ADDRESS" ]; then + addresses="${addresses},${PRIVATE_ADDRESS}" + fi + if [ -n "$LOAD_BALANCER_ADDRESS" ]; then + addresses="${addresses},${LOAD_BALANCER_ADDRESS}" + fi + if [ -n "$KUBERNETES_MASTER_ADDR" ]; then + addresses="${addresses},${KUBERNETES_MASTER_ADDR}" + fi + if [ -n "$POD_CIDR" ]; then + addresses="${addresses},${POD_CIDR}" + fi + if [ -n "$SERVICE_CIDR" ]; then + addresses="${addresses},${SERVICE_CIDR}" + fi + if [ -n "$ADDITIONAL_NO_PROXY_ADDRESSES" ]; then + addresses="${addresses},${ADDITIONAL_NO_PROXY_ADDRESSES}" + fi + + # filter duplicates + addresses=$(unique_no_proxy_addresses "$addresses") + + # kubeadm requires this in the environment to reach K8s masters + export no_proxy="$addresses" + NO_PROXY_ADDRESSES="$addresses" + echo "Exported no_proxy: $no_proxy" +} + +function unique_no_proxy_addresses() { + echo "$1" | sed 's/,/\n/g' | sed '/^\s*$/d' | sort | uniq | paste -s --delimiters="," +} + + +REPORTING_CONTEXT_INFO="" + +INSTALLATION_ID= +TESTGRID_ID= +KURL_CLUSTER_UUID= +function report_install_start() { + # report that the install started + # this includes the install ID, time, kurl URL, and linux distribution name + version. + + if [ -f "/tmp/testgrid-id" ]; then + TESTGRID_ID=$(cat /tmp/testgrid-id) + fi + + # if airgapped, don't create an installation ID and return early + if [ "$AIRGAP" == "1" ]; then + return 0 + fi + + # if DISABLE_REPORTING is set, don't create an installation ID (which thus disables all the other reporting calls) and return early + if [ "${DISABLE_REPORTING}" = "1" ]; then + return 0 + fi + + INSTALLATION_ID=$(< /dev/urandom tr -dc a-z0-9 | head -c16) + local started=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # rfc3339 + + # Determine if it is the first kurl install + local is_upgrade="false" + if kubernetes_resource_exists kube-system configmap kurl-config; then + local is_upgrade="true" + fi + + # get the kurl_cluster_id + attempt_get_cluster_id + + curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ + -d "{\ + \"started\": \"$started\", \ + \"os\": \"$LSB_DIST $DIST_VERSION\", \ + \"kernel_version\": \"$KERNEL_MAJOR.$KERNEL_MINOR\", \ + \"kurl_url\": \"$KURL_URL\", \ + \"installer_id\": \"$INSTALLER_ID\", \ + \"testgrid_id\": \"$TESTGRID_ID\", \ + \"machine_id\": \"$MACHINE_ID\", \ + \"kurl_instance_uuid\": \"$KURL_INSTANCE_UUID\", \ + \"is_upgrade\": $is_upgrade, \ + \"is_ha_cluster\": \"$HA_CLUSTER\", \ + \"num_processors\": \"$(nproc)\", \ + \"memory_size_kb\": \"$(cat /proc/meminfo | grep MemTotal | awk '{print $2}')\", \ + \"kurl_cluster_uuid\": \"$KURL_CLUSTER_UUID\" \ + }" \ + $REPLICATED_APP_URL/kurl_metrics/start_install/$INSTALLATION_ID || true +} + +function report_install_success() { + # report that the install finished successfully + + # if INSTALLATION_ID is empty reporting is disabled + if [ -z "$INSTALLATION_ID" ]; then + return 0 + fi + + local completed=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # rfc3339 + + curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ + -d "{\"finished\": \"$completed\"}" \ + $REPLICATED_APP_URL/kurl_metrics/finish_install/$INSTALLATION_ID || true +} + +function report_install_fail() { + # report that the install failed + local cause=$1 + + # if INSTALLATION_ID is empty reporting is disabled + if [ -z "$INSTALLATION_ID" ]; then + return 0 + fi + + local completed=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # rfc3339 + + curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ + -d "{\"finished\": \"$completed\", \"cause\": \"$cause\"}" \ + $REPLICATED_APP_URL/kurl_metrics/fail_install/$INSTALLATION_ID || true +} + +function report_addon_start() { + # report that an addon started installation + local name=$1 + local version=$2 + + # if INSTALLATION_ID is empty reporting is disabled + if [ -z "$INSTALLATION_ID" ]; then + return 0 + fi + + local started=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # rfc3339 + + curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ + -d "{\"started\": \"$started\", \"addon_version\": \"$version\", \"testgrid_id\": \"$TESTGRID_ID\"}" \ + $REPLICATED_APP_URL/kurl_metrics/start_addon/$INSTALLATION_ID/$name || true +} + +function report_addon_success() { + # report that an addon installed successfully + local name=$1 + local version=$2 + + # if INSTALLATION_ID is empty reporting is disabled + if [ -z "$INSTALLATION_ID" ]; then + return 0 + fi + + local completed=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # rfc3339 + + curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ + -d "{\"finished\": \"$completed\"}" \ + $REPLICATED_APP_URL/kurl_metrics/finish_addon/$INSTALLATION_ID/$name || true +} + +function report_addon_fail() { + # report that an addon installed successfully + local name=$1 + local version=$2 + + # if INSTALLATION_ID is empty reporting is disabled + if [ -z "$INSTALLATION_ID" ]; then + return 0 + fi + + local completed=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # rfc3339 + + curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ + -d "{\"finished\": \"$completed\"}" \ + $REPLICATED_APP_URL/kurl_metrics/fail_addon/$INSTALLATION_ID/$name || true +} + +function ctrl_c() { + trap - SIGINT # reset SIGINT handler to default - someone should be able to ctrl+c the support bundle collector + read line file <<<$(caller) + + printf "${YELLOW}Trapped ctrl+c on line $line${NC}\n" + + local totalStack + totalStack=$(stacktrace) + + local infoString="with stack $totalStack - bin utils $KURL_BIN_UTILS_FILE - context $REPORTING_CONTEXT_INFO" + + if [ -z "$SUPPORT_BUNDLE_READY" ]; then + report_install_fail "trapped ctrl+c before completing k8s install $infoString" + exit 1 + fi + + report_install_fail "trapped ctrl+c $infoString" + + collect_support_bundle + + exit 1 # exit with error +} + +# unused +function addon_install_fail() { + # report that an addon failed to install successfully + local name=$1 + local version=$2 + + # if INSTALLATION_ID is empty reporting is disabled + if [ -z "$INSTALLATION_ID" ]; then + return 1 # return error because the addon in question did too + fi + + local completed=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # rfc3339 + + curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ + -d "{\"finished\": \"$completed\"}" \ + $REPLICATED_APP_URL/kurl_metrics/fail_addon/$INSTALLATION_ID/$name || true + + # provide an option for a user to provide a support bundle + printf "${YELLOW}Addon ${name} ${version} failed to install${NC}\n" + collect_support_bundle + + return 1 # return error because the addon in question did too +} + +# unused +function addon_install_fail_nobundle() { + # report that an addon failed to install successfully + local name=$1 + local version=$2 + + # if INSTALLATION_ID is empty reporting is disabled + if [ -z "$INSTALLATION_ID" ]; then + return 1 # return error because the addon in question did too + fi + + local completed=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # rfc3339 + + curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ + -d "{\"finished\": \"$completed\"}" \ + $REPLICATED_APP_URL/kurl_metrics/fail_addon/$INSTALLATION_ID/$name || true + + return 1 # return error because the addon in question did too +} + +function collect_support_bundle() { + trap - SIGINT # reset SIGINT handler to default - someone should be able to ctrl+c the support bundle collector + return 0 # disabled for now + + # if someone has set ASSUME_YES, we shouldn't automatically upload a support bundle + if [ "$ASSUME_YES" = "1" ]; then + return 0 + fi + if ! prompts_can_prompt ; then + return 0 + fi + + printf "${YELLOW}Would you like to provide a support bundle to aid us in avoiding similar errors in the future?${NC}\n" + if ! confirmN; then + return 0 + fi + + printf "${YELLOW}Please provide your work email address for our records (this is not a support ticket):${NC}\n" + prompt + local email_address="" + if [ -n "$PROMPT_RESULT" ]; then + email_address="$PROMPT_RESULT" + fi + + printf "${YELLOW}Could you provide a quick description of the issue you encountered?${NC}\n" + prompt + local issue_description="" + if [ -n "$PROMPT_RESULT" ]; then + issue_description="$PROMPT_RESULT" + fi + + # collect support bundle + printf "Collecting support bundle now:" + kubectl support-bundle https://kots.io + + # find the support bundle filename + local support_bundle_filename=$(find . -type f -name "support-bundle-*.tar.gz" | sort -r | head -n 1) + + curl 'https://support-bundle-secure-upload.replicated.com/v1/upload' \ + -H 'accept: application/json, text/plain, */*' \ + -X POST \ + -H "Content-Type: multipart/form-data" \ + -F "data={\"first_name\":\"kurl.sh\",\"last_name\":\"installer\",\"email_address\":\"${email_address}\",\"company\":\"\",\"description\":\"${issue_description}\"}" \ + -F "file=@${support_bundle_filename}" \ + --compressed + + printf "\nSupport bundle uploaded!\n" +} + +function trap_report_error { + if [[ ! $- =~ e ]]; then # if errexit is not set (set -e), don't report an error here + return 0 + fi + + trap - ERR # reset the error handler to default in case there are errors within this function + read line file <<<$(caller) + printf "${YELLOW}An error occurred on line $line${NC}\n" + + local totalStack + totalStack=$(stacktrace) + + report_install_fail "An error occurred with stack $totalStack - bin utils $KURL_BIN_UTILS_FILE - context $REPORTING_CONTEXT_INFO" + + if [ -n "$SUPPORT_BUNDLE_READY" ]; then + collect_support_bundle + fi + + exit 1 +} + +function stacktrace { + local i=1 + local totalStack + while caller $i > /dev/null; do + read line func file <<<$(caller $i) + totalStack="$totalStack (file: $file func: $func line: $line)" + ((i++)) + done + echo "$totalStack" +} + +# attempt_get_cluster_id will get the cluster uuid from the kurl_cluster_uuid configmap and set the +# KURL_CLUSTER_UUID env var. If it does not exist or the cluster is down, check the disk to see if +# it is persisted there, otherwise make a new UUID for KURL_CLUSTER_UUID and save to disk. +function attempt_get_cluster_id() { + if ! kubernetes_resource_exists kurl configmap kurl-cluster-uuid ; then + # If the cluster is down, check to see if this is an etcd member and the cluster uuid is + # persisted to disk. + if [ -d /var/lib/etcd/member ] && [ -f "${KURL_INSTALL_DIRECTORY}/clusteruuid" ]; then + KURL_CLUSTER_UUID=$(cat "${KURL_INSTALL_DIRECTORY}/clusteruuid") + else + KURL_CLUSTER_UUID=$(< /dev/urandom tr -dc a-z0-9 | head -c32) + fi + else + KURL_CLUSTER_UUID=$(kubectl get configmap -n kurl kurl-cluster-uuid -o jsonpath='{.data.kurl_cluster_uuid}') + fi + + # Persist the cluster uuid to disk in case the cluster is down. + # The tasks.sh reset command will remove the KURL_INSTALL_DIRECTORY directory and the cluster uuid will + # be regenerated if reset. + echo "$KURL_CLUSTER_UUID" > "${KURL_INSTALL_DIRECTORY}/clusteruuid" +} + +# maybe_set_kurl_cluster_uuid will create the kurl_cluster_uuid configmap using the +# KURL_CLUSTER_UUID env var if it does not already exist. +function maybe_set_kurl_cluster_uuid() { + if [ -z "$KURL_CLUSTER_UUID" ]; then + return 0 + fi + + if kubernetes_resource_exists kurl configmap kurl-cluster-uuid; then + return 0 + fi + + kubectl create configmap -n kurl kurl-cluster-uuid --from-literal=kurl_cluster_uuid="$KURL_CLUSTER_UUID" +} + +# shellcheck disable=SC2148 + +function disable_rook_ceph_operator() { + if ! is_rook_1; then + return 0 + fi + + kubectl -n rook-ceph scale deployment rook-ceph-operator --replicas=0 +} + +function enable_rook_ceph_operator() { + if ! is_rook_1; then + return 0 + fi + + kubectl -n rook-ceph scale deployment rook-ceph-operator --replicas=1 +} + +function is_rook_1() { + kubectl -n rook-ceph get cephblockpools replicapool &>/dev/null +} + +function rook_ceph_osd_pods_gone() { + if kubectl -n rook-ceph get pods -l app=rook-ceph-osd 2>/dev/null | grep 'rook-ceph-osd' &>/dev/null ; then + return 1 + fi + return 0 +} + +function prometheus_pods_gone() { + if kubectl -n monitoring get pods -l app=prometheus 2>/dev/null | grep 'prometheus' &>/dev/null ; then + return 1 + fi + if kubectl -n monitoring get pods -l app.kubernetes.io/name=prometheus 2>/dev/null | grep 'prometheus' &>/dev/null ; then # the labels changed with prometheus 0.53+ + return 1 + fi + + return 0 +} + +function ekco_pods_gone() { + pods_gone_by_selector kurl app=ekc-operator +} + +# rook_disable_ekco_operator disables the ekco operator if it exists. +function rook_disable_ekco_operator() { + if kubernetes_resource_exists kurl deployment ekc-operator ; then + log "Scaling down EKCO deployment to 0 replicas" + kubernetes_scale_down kurl deployment ekc-operator + log "Waiting for ekco pods to be removed" + if ! spinner_until 120 ekco_pods_gone; then + logFail "Unable to scale down ekco operator" + return 1 + fi + fi +} + +# rook_enable_ekco_operator enables the ekco operator if it exists. +function rook_enable_ekco_operator() { + if kubernetes_resource_exists kurl deployment ekc-operator ; then + echo "Scaling up EKCO deployment to 1 replica" + kubernetes_scale kurl deployment ekc-operator 1 + fi +} + +function remove_rook_ceph() { + # For further information see: https://github.com/rook/rook/blob/v1.11.2/Documentation/Storage-Configuration/ceph-teardown.md + # make sure there aren't any PVs using rook before deleting it + all_pv_drivers="$(kubectl get pv -o=jsonpath='{.items[*].spec.csi.driver}')" + if echo "$all_pv_drivers" | grep "rook" &>/dev/null ; then + logFail "There are still PVs using rook-ceph." + logFail "Remove these PV(s) before continuing." + return 1 + fi + + # scale ekco to 0 replicas if it exists + if kubernetes_resource_exists kurl deployment ekc-operator; then + kubectl -n kurl scale deploy ekc-operator --replicas=0 + log "Waiting for ekco pods to be removed" + if ! spinner_until 120 ekco_pods_gone; then + logFail "Unable to scale down ekco operator" + return 1 + fi + fi + + log "Waiting up to 1 minute to remove rook-ceph pool" + if ! kubectl delete -n rook-ceph cephblockpool replicapool --timeout=60s; then + logWarn "Unable to delete rook-ceph pool" + fi + + log "Waiting up to 1 minute to remove rook-ceph Storage Classes" + if ! kubectl get storageclass | grep rook | awk '{ print $1 }' | xargs -I'{}' kubectl delete storageclass '{}' --timeout=60s; then + logFail "Unable to delete rook-ceph StorageClasses" + return 1 + fi + + # More info: https://github.com/rook/rook/blob/v1.10.12/Documentation/CRDs/Cluster/ceph-cluster-crd.md#cleanup-policy + log "Patch Ceph cluster to allow deletion" + kubectl -n rook-ceph patch cephcluster rook-ceph --type merge -p '{"spec":{"cleanupPolicy":{"confirmation":"yes-really-destroy-data"}}}' + + # remove all rook-ceph CR objects + log "Removing rook-ceph custom resource objects - this may take some time:" + log "Waiting up to 3 minutes to remove rook-ceph CephCluster resource" + if ! kubectl delete cephcluster -n rook-ceph rook-ceph --timeout=180s; then + # More info: https://github.com/rook/rook/blob/v1.10.12/Documentation/Storage-Configuration/ceph-teardown.md#removing-the-cluster-crd-finalizer + logWarn "Timeout of 3 minutes faced deleting the rook-ceph CephCluster resource" + logWarn "Removing critical finalizers" + kubectl -n rook-ceph patch configmap rook-ceph-mon-endpoints --type merge -p '{"metadata":{"finalizers": []}}' + kubectl -n rook-ceph patch secrets rook-ceph-mon --type merge -p '{"metadata":{"finalizers": []}}' + log "Waiting up to 2 minutes to remove rook-ceph CephCluster resource after remove critical finalizers" + if ! kubectl delete cephcluster -n rook-ceph rook-ceph --timeout=120s; then + logWarn "Timeout of 2 minutes faced deleting the rook-ceph CephCluster resource after finalizers have be removed." + logWarn "Forcing by removing all finalizers" + local crd + for crd in $(kubectl get crd -n rook-ceph | awk '/ceph.rook.io/ {print $1}') ; do + kubectl get -n rook-ceph "$crd" -o name | \ + xargs -I {} kubectl patch -n rook-ceph {} --type merge -p '{"metadata":{"finalizers": []}}' + done + # After remove the finalizers the resources might get deleted without the need to try again + sleep 20s + if kubectl get cephcluster -n rook-ceph rook-ceph >/dev/null 2>&1; then + log "Waiting up to 1 minute to remove rook-ceph CephCluster resource" + if ! kubectl delete cephcluster -n rook-ceph rook-ceph --timeout=60s; then + logFail "Unable to delete the rook-ceph CephCluster resource" + return 1 + fi + else + log "The rook-ceph CephCluster resource was deleted" + fi + fi + fi + + log "Removing rook-ceph custom resources" + if ! kubectl get crd | grep 'ceph.rook.io' | awk '{ print $1 }' | xargs -I'{}' kubectl -n rook-ceph delete '{}' --all --timeout=60s; then + logWarn "Unable to delete the rook-ceph custom resources" + fi + + log "Removing rook-ceph Volume resources" + if ! kubectl delete volumes.rook.io --all --timeout=60s; then + logWarn "Unable to delete rook-ceph Volume resources" + fi + + log "Waiting for rook-ceph OSD pods to be removed" + if ! spinner_until 120 rook_ceph_osd_pods_gone; then + logWarn "rook-ceph OSD pods were not deleted" + fi + + log "Removing rook-ceph CRDs" + if ! kubectl get crd | grep 'ceph.rook.io' | awk '{ print $1 }' | xargs -I'{}' kubectl delete crd '{}' --timeout=60s; then + logWarn "Unable to delete rook-ceph CRDs" + fi + + log "Removing rook-ceph volumes custom resource" + if ! kubectl delete --ignore-not-found crd volumes.rook.io --timeout=60s; then + logWarn "Unable delete rook-ceph volumes custom resource" + fi + + log "Removing the rook-ceph Namespace" + if ! kubectl delete ns rook-ceph --timeout=60s; then + logFail "Unable to delete the rook-ceph Namespace" + logFail "These resources are preventing the namespace's deletion:" + kubectl api-resources --verbs=list --namespaced -o name \ + | xargs -n 1 kubectl get --show-kind --ignore-not-found -n rook-ceph + return 1 + fi + + # scale ekco back to 1 replicas if it exists + if kubernetes_resource_exists kurl deployment ekc-operator; then + kubectl -n kurl get configmap ekco-config -o yaml | \ + sed --expression='s/maintain_rook_storage_nodes:[ ]*true/maintain_rook_storage_nodes: false/g' | \ + kubectl -n kurl apply -f - + kubectl -n kurl scale deploy ekc-operator --replicas=1 + fi + + rm -rf /var/lib/rook || true + rm -rf /opt/replicated/rook || true + + if [ -d "/var/lib/rook" ] || [ -d "/opt/replicated/rook" ]; then + logWarn "Data within /var/lib/rook, /opt/replicated/rook and any bound disks has not been freed." + fi + + # print success message + logSuccess "Removed rook-ceph successfully!" +} + +# scale down prometheus, move all 'rook-ceph' PVCs to provided storage class, scale up prometheus +# Supported storage class migrations from ceph are: 'longhorn' and 'openebs' +function rook_ceph_to_sc_migration() { + local destStorageClass=$1 + local didRunValidationChecks=$2 + local scProvisioner + scProvisioner=$(kubectl get sc "$destStorageClass" -ojsonpath='{.provisioner}') + + # we only support migrating to 'longhorn' and 'openebs' storage classes + if [[ "$scProvisioner" != *"longhorn"* ]] && [[ "$scProvisioner" != *"openebs"* ]]; then + bail "Ceph to $scProvisioner migration is not supported" + fi + + report_addon_start "rook-ceph-to-${scProvisioner}-migration" "v2" + + # patch ceph so that it does not consume new devices that longhorn creates + echo "Patching CephCluster storage.useAllDevices=false" + kubectl -n rook-ceph patch cephcluster rook-ceph --type json --patch '[{"op": "replace", "path": "/spec/storage/useAllDevices", value: false}]' + sleep 1 + echo "Waiting for CephCluster to update" + spinner_until 300 rook_osd_phase_ready || true # don't fail + + # set prometheus scale if it exists + local ekcoScaledDown=0 + if kubectl get namespace monitoring &>/dev/null; then + if kubectl -n monitoring get prometheus k8s &>/dev/null; then + # before scaling down prometheus, scale down ekco as it will otherwise restore the prometheus scale + if kubernetes_resource_exists kurl deployment ekc-operator; then + ekcoScaledDown=1 + kubectl -n kurl scale deploy ekc-operator --replicas=0 + log "Waiting for ekco pods to be removed" + if ! spinner_until 120 ekco_pods_gone; then + logFail "Unable to scale down ekco operator" + return 1 + fi + fi + + kubectl -n monitoring patch prometheus k8s --type='json' --patch '[{"op": "replace", "path": "/spec/replicas", value: 0}]' + echo "Waiting for prometheus pods to be removed" + spinner_until 300 prometheus_pods_gone + fi + fi + + # scale down ekco if kotsadm is using rqlite. + if kubernetes_resource_exists default statefulset kotsadm-rqlite ; then + if [ "$ekcoScaledDown" = "0" ]; then + if kubernetes_resource_exists kurl deployment ekc-operator; then + ekcoScaledDown=1 + kubectl -n kurl scale deploy ekc-operator --replicas=0 + log "Waiting for ekco pods to be removed" + if ! spinner_until 120 ekco_pods_gone; then + logFail "Unable to scale down ekco operator" + return 1 + fi + fi + fi + fi + + # get the list of StorageClasses that use rook-ceph + rook_scs=$(kubectl get storageclass | grep rook | grep -v '(default)' | awk '{ print $1}') # any non-default rook StorageClasses + rook_default_sc=$(kubectl get storageclass | grep rook | grep '(default)' | awk '{ print $1}') # any default rook StorageClasses + + for rook_sc in $rook_scs + do + if [ "$didRunValidationChecks" == "1" ]; then + # run the migration w/o validation checks + $BIN_PVMIGRATE --source-sc "$rook_sc" --dest-sc "$destStorageClass" --rsync-image "$KURL_UTIL_IMAGE" --skip-free-space-check --skip-preflight-validation + else + # run the migration (without setting defaults) + $BIN_PVMIGRATE --source-sc "$rook_sc" --dest-sc "$destStorageClass" --rsync-image "$KURL_UTIL_IMAGE" + fi + done + + for rook_sc in $rook_default_sc + do + if [ "$didRunValidationChecks" == "1" ]; then + # run the migration w/o validation checks + $BIN_PVMIGRATE --source-sc "$rook_sc" --dest-sc "$destStorageClass" --rsync-image "$KURL_UTIL_IMAGE" --skip-free-space-check --skip-preflight-validation --set-defaults + else + # run the migration (setting defaults) + $BIN_PVMIGRATE --source-sc "$rook_sc" --dest-sc "$destStorageClass" --rsync-image "$KURL_UTIL_IMAGE" --set-defaults + fi + done + + # reset ekco scale + if [ "$ekcoScaledDown" = "1" ] ; then + kubectl -n kurl scale deploy ekc-operator --replicas=1 + fi + + # reset prometheus scale + if kubectl get namespace monitoring &>/dev/null; then + if kubectl get prometheus -n monitoring k8s &>/dev/null; then + kubectl patch prometheus -n monitoring k8s --type='json' --patch '[{"op": "replace", "path": "/spec/replicas", value: 2}]' + fi + fi + + # print success message + printf "${GREEN}Migration from rook-ceph to %s completed successfully!\n${NC}" "$scProvisioner" + report_addon_success "rook-ceph-to-$scProvisioner-migration" "v2" +} + +# if PVCs and object store data have both been migrated from rook-ceph and rook-ceph is no longer specified in the kURL spec, remove rook-ceph +function maybe_cleanup_rook() { + if [ -z "$ROOK_VERSION" ]; then + + # Just continue if Rook is installed. + if ! kubectl get ns | grep -q rook-ceph; then + return + fi + logStep "Removing Rook" + + export DID_MIGRATE_ROOK_PVCS=0 + export DID_MIGRATE_ROOK_OBJECT_STORE=0 + DID_MIGRATE_ROOK_PVCS=$(kubectl -n kurl get --ignore-not-found configmap kurl-migration-from-rook -o jsonpath='{ .data.DID_MIGRATE_ROOK_PVCS }') + DID_MIGRATE_ROOK_OBJECT_STORE=$(kubectl -n kurl get --ignore-not-found configmap kurl-migration-from-rook -o jsonpath='{ .data.DID_MIGRATE_ROOK_OBJECT_STORE }') + + if [ "$DID_MIGRATE_ROOK_PVCS" == "1" ] && [ "$DID_MIGRATE_ROOK_OBJECT_STORE" == "1" ]; then + report_addon_start "rook-ceph-removal" "v1.1" + if ! remove_rook_ceph; then + logFail "Unable to remove Rook." + report_addon_fail "rook-ceph-removal" "v1.1" + return + fi + kubectl delete configmap kurl-migration-from-rook -n kurl + report_addon_success "rook-ceph-removal" "v1.1" + return + fi + + # If upgrade from Rook to OpenEBS without Minio we cannot remove Rook because + # we do not know if the solution uses or not ObjectStore and if someone data will not be lost + if [ "$DID_MIGRATE_ROOK_PVCS" == "1" ] && [ -z "$MINIO_VERSION" ]; then + if [ -z "$DID_MIGRATE_ROOK_OBJECT_STORE" ] || [ "$DID_MIGRATE_ROOK_OBJECT_STORE" != "1" ]; then + logWarn "PVC(s) were migrated from Rook but Object Store data was not, as no MinIO version was specified." + logWarn "Rook will not be automatically removed without migrating Object Store data." + logWarn "" + logWarn "If you are sure that Object Store data is not used, you can manually perform this operation" + logWarn "by running the remove_rook_ceph task:" + logWarn "$ curl /task.sh | sudo bash -s remove_rook_ceph, i.e.:" + logWarn "" + logWarn "curl https://kurl.sh/latest/tasks.sh | sudo bash -s remove_rook_ceph" + fi + fi + logFail "Unable to remove Rook." + if [ "$DID_MIGRATE_ROOK_PVCS" != "1" ]; then + logFail "Storage class migration did not succeed" + fi + + if [ -n "$MINIO_VERSION" ] && [ "$DID_MIGRATE_ROOK_OBJECT_STORE" != "1" ]; then + logFail "Object Store migration did not succeed" + fi + fi +} + +function rook_osd_phase_ready() { + if [ "$(current_rook_version)" = "1.0.4" ]; then + [ "$(kubectl -n rook-ceph get cephcluster rook-ceph --template '{{.status.state}}')" = 'Created' ] + else + [ "$(kubectl -n rook-ceph get cephcluster rook-ceph --template '{{.status.phase}}')" = 'Ready' ] + fi +} + +function current_rook_version() { + kubectl -n rook-ceph get deploy rook-ceph-operator -oyaml 2>/dev/null \ + | grep ' image: ' \ + | awk -F':' 'NR==1 { print $3 }' \ + | sed 's/v\([^-]*\).*/\1/' +} + +function current_ceph_version() { + kubectl -n rook-ceph get deployment rook-ceph-mgr-a -o jsonpath='{.metadata.labels.ceph-version}' 2>/dev/null \ + | awk -F'-' '{ print $1 }' +} + +function rook_operator_ready() { + local rook_status_phase= + local rook_status_msg= + rook_status_phase=$(kubectl -n rook-ceph get cephcluster rook-ceph --template '{{.status.phase}}') + rook_status_msg=$(kubectl -n rook-ceph get cephcluster rook-ceph --template '{{.status.message}}') + if [ "$rook_status_phase" != "Ready" ]; then + log "Rook operator is not ready: $rook_status_msg" + return 1 + fi + return 0 +} + +function rook_is_healthy_to_upgrade() { + log "Awaiting 2 minutes to check Rook Ceph Pod(s) are Running" + if ! spinner_until 120 check_for_running_pods "rook-ceph"; then + logFail "Rook Ceph has unhealthy Pod(s)" + return 1 + fi + + log "Awaiting Rook Ceph health ..." + if ! $DIR/bin/kurl rook wait-for-health 600 ; then + kubectl -n rook-ceph exec deploy/rook-ceph-tools -- ceph status + logFail "Rook Ceph is unhealthy" + return 1 + fi + + log "Checking Rook Ceph versions and replicas" + kubectl -n rook-ceph get deployment -l rook_cluster=rook-ceph -o jsonpath='{range .items[*]}{.metadata.name}{" \treq/upd/avl: "}{.spec.replicas}{"/"}{.status.updatedReplicas}{"/"}{.status.readyReplicas}{" \trook-version="}{.metadata.labels.rook-version}{"\n"}{end}' + local rook_versions= + rook_versions="$(kubectl -n rook-ceph get deployment -l rook_cluster=rook-ceph -o jsonpath='{range .items[*]}{"rook-version="}{.metadata.labels.rook-version}{"\n"}{end}' | sort | uniq)" + if [ -n "${rook_versions}" ] && [ "$(echo "${rook_versions}" | wc -l)" -gt "1" ]; then + logFail "Multiple Rook versions detected" + logFail "${rook_versions}" + return 1 + fi + + log "Checking Ceph versions and replicas" + kubectl -n rook-ceph get deployment -l rook_cluster=rook-ceph -o jsonpath='{range .items[*]}{.metadata.name}{" \treq/upd/avl: "}{.spec.replicas}{"/"}{.status.updatedReplicas}{"/"}{.status.readyReplicas}{" \tceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}' + local ceph_versions_found= + ceph_versions_found="$(kubectl -n rook-ceph get deployment -l rook_cluster=rook-ceph -o jsonpath='{range .items[*]}{"ceph-version="}{.metadata.labels.ceph-version}{"\n"}{end}' | sort | uniq)" + if [ -n "${ceph_versions_found}" ] && [ "$(echo "${ceph_versions_found}" | wc -l)" -gt "1" ]; then + # It is required because an Rook Ceph bug which was sorted out with the release 1.4.8 + # More info: https://github.com/rook/rook/pull/6610 + if [ "$(echo "${ceph_versions_found}" | wc -l)" == "2" ] && [ "$(echo "${ceph_versions_found}" | grep "0.0.0-0")" ]; then + log "Found two ceph versions but one of them is 0.0.0-0 which will be ignored" + echo "${ceph_versions_found}" + else + logFail "Multiple Ceph versions detected" + logFail "${ceph_versions_found}" + return 1 + fi + fi + return 0 +} + +# Check if the kurl-migration-from-rook exists then, if not creates it +# To add DID_MIGRATE_ROOK_PVCS = "1" in order to track that the PVCs were migrated +function add_rook_pvc_migration_status() { + if ! kubectl -n kurl get configmap kurl-migration-from-rook 2>/dev/null; then + log "Creating ConfigMap to track status of migration from Rook" + kubectl create configmap kurl-migration-from-rook -n kurl + fi + kubectl patch configmap kurl-migration-from-rook -n kurl --type merge -p '{"data":{"DID_MIGRATE_ROOK_PVCS":"1"}}' + export DID_MIGRATE_ROOK_PVCS=1 +} + +# Check if the kurl-migration-from-rook exists then, if not creates it +# To add DID_MIGRATE_ROOK_PVCS = "1" in order to track that the PVCs were migrated +function add_rook_store_object_migration_status() { + if ! kubectl -n kurl get configmap kurl-migration-from-rook 2>/dev/null; then + log "Creating ConfigMap to track status of migration from Rook" + kubectl create configmap kurl-migration-from-rook -n kurl + fi + kubectl patch configmap kurl-migration-from-rook -n kurl --type merge -p '{"data":{"DID_MIGRATE_ROOK_OBJECT_STORE":"1"}}' + export DID_MIGRATE_ROOK_OBJECT_STORE=1 +} + +# shellcheck disable=SC2148 + +export PV_BASE_PATH=/opt/replicated/rook + +# rook_upgrade_maybe_report_upgrade_rook checks if rook should be upgraded before upgrading k8s, +# prompts the user to confirm the upgrade, and starts the upgrade process. +function rook_upgrade_maybe_report_upgrade_rook() { + local current_version= + current_version="$(current_rook_version)" + local desired_version="$ROOK_VERSION" + + if ! rook_upgrade_should_upgrade_rook "$current_version" "$desired_version" ; then + return + fi + + if ! rook_upgrade_prompt "$current_version" "$desired_version" ; then + bail "Not upgrading Rook" + fi + + if ! rook_upgrade_storage_check "$current_version" "$desired_version" ; then + bail "Not upgrading Rook" + fi + + rook_upgrade_report_upgrade_rook "$current_version" "$desired_version" + + # shellcheck disable=SC1090 + addon_source "rook" "$ROOK_VERSION" # This will undo the override from above prior to running addon_install +} + +# rook_upgrade_should_upgrade_rook checks the currently installed rook version and the desired rook +# version. If the current version is two minor versions or more less than the desired version, then +# the function will return true. +function rook_upgrade_should_upgrade_rook() { + local current_version="$1" + local desired_version="$2" + + # rook is not installed, so no upgrade + if [ -z "$current_version" ]; then + return 1 + fi + # rook is not requested to be installed, so no upgrade + if [ -z "$desired_version" ]; then + return 1 + fi + + semverParse "$current_version" + # shellcheck disable=SC2154 + local current_rook_version_major="$major" + # shellcheck disable=SC2154 + local current_rook_version_minor="$minor" + + semverParse "$desired_version" + local next_rook_version_major="$major" + local next_rook_version_minor="$minor" + # shellcheck disable=SC2154 + local next_rook_version_patch="$patch" + + # upgrades not supported for major versions not equal to 1 + if [ "$current_rook_version_major" != "1" ] || [ "$next_rook_version_major" != "1" ]; then + return 1 + fi + + # upgrade not needed for minor versions equal + if [ "$current_rook_version_minor" = "$next_rook_version_minor" ]; then + return 1 + fi + + # upgrades not supported to minor versions less than 4 + if [ "$next_rook_version_minor" -lt "4" ]; then + return 1 + # special case 1.0 to 1.4 upgrade + elif [ "$next_rook_version_minor" = "4" ]; then + # upgrades not supported from to 1.4 patch versions less than 1.4.9 + if [ "$next_rook_version_patch" -lt "9" ]; then + return 1 + fi + return 0 + fi + + # current version must be greater than or equal to desired version - 1 since the add-on itself + # can do single version upgrades although this is not true for minor versions less than 4 + if [ "$current_rook_version_minor" -ge "$((next_rook_version_minor - 1))" ]; then + return 1 + fi + + return 0 +} + +# rook_upgrade_prompt prompts the user to confirm the rook upgrade. +function rook_upgrade_prompt() { + local current_version="$1" + local desired_version="$2" + logWarn "$(printf "This script will upgrade Rook from %s to %s." "$current_version" "$desired_version")" + logWarn "Upgrading Rook will take some time and will place additional load on your server." + if ! "$DIR"/bin/kurl rook has-sufficient-blockdevices ; then + logWarn "In order to complete this migration, you may need to attach a blank disk to each node in the cluster for Rook to use." + fi + printf "Would you like to continue? " + + confirmN +} + +# rook_upgrade_storage_check verifies that enough disk space exists for the rook upgrade to complete +# successfully. +function rook_upgrade_storage_check() { + local current_version="$1" + local desired_version="$2" + + local archive_size= + archive_size="$(rook_upgrade_required_archive_size "$current_version" "$desired_version")" + + # 2.5x archive size for extracted files + # 1x archive size for container images + common_upgrade_storage_check "$archive_size" $((5/2)) 1 "Rook" +} + +# rook_upgrade_report_upgrade_rook reports the upgrade and starts the upgrade process. +function rook_upgrade_report_upgrade_rook() { + local current_version="$1" + local desired_version="$2" + + local from_version= + from_version="$(common_upgrade_version_to_major_minor "$current_version")" + + local rook_upgrade_version="v2.0.0" # if you change this code, change the version + report_addon_start "rook_${from_version}_to_${desired_version}" "$rook_upgrade_version" + export REPORTING_CONTEXT_INFO="rook_${from_version}_to_${desired_version} $rook_upgrade_version" + rook_upgrade "$current_version" "$desired_version" + export REPORTING_CONTEXT_INFO="" + report_addon_success "rook_${from_version}_to_${desired_version}" "$rook_upgrade_version" +} + +# rook_upgrade upgrades will fetch the add-on and load the images for the upgrade and finally run +# the upgrade script. +function rook_upgrade() { + local current_version="$1" + local desired_version="$2" + + rook_disable_ekco_operator + + # when invoked in a subprocess the failure of this function will not cause the script to exit + # sanity check that the rook version is valid + rook_upgrade_step_versions "${ROOK_STEP_VERSIONS[*]}" "$current_version" "$desired_version" 1>/dev/null + + logStep "Upgrading Rook from $current_version to $desired_version" + common_upgrade_print_list_of_minor_upgrades "$current_version" "$desired_version" + echo "This may take some time." + rook_upgrade_addon_fetch_and_load "$current_version" "$desired_version" + + rook_upgrade_prompt_missing_images "$current_version" "$desired_version" + + # delete the mutatingwebhookconfiguration and remove the rook-priority.kurl.sh label + # as the EKCO rook-priority.kurl.sh mutating webhook is no longer necessary passed Rook + # 1.0.4. + kubectl label namespace rook-ceph rook-priority.kurl.sh- + kubectl delete mutatingwebhookconfigurations rook-priority.kurl.sh --ignore-not-found + + if common_upgrade_is_version_included "$current_version" "$desired_version" "1.4" ; then + addon_source "rookupgrade" "10to14" + rookupgrade_10to14_upgrade "$current_version" + + # delete both the compressed and decompressed addon files to free up space + rm -f "$DIR/assets/rookupgrade-10to14.tar.gz" + rm -rf "$DIR/addons/rookupgrade/10to14" + fi + + # if desired_version is greater than 1.4, then continue with the upgrade + if [ "$(common_upgrade_compare_versions "$desired_version" "1.4")" = "1" ]; then + rook_upgrade_do_rook_upgrade "$(common_upgrade_max_version "1.4" "$current_version")" "$desired_version" + fi + + rook_enable_ekco_operator + + logSuccess "Successfully upgraded Rook from $current_version to $desired_version" +} + +# rook_upgrade_do_rook_upgrade will step through each minor version upgrade from $current_version to +# $desired_version +function rook_upgrade_do_rook_upgrade() { + local current_version="$1" + local desired_version="$2" + + local step= + while read -r step; do + if [ -z "$step" ] || [ "$step" = "0.0.0" ]; then + continue + fi + if ! addon_exists "rook" "$step" ; then + bail "Rook version $step not found" + fi + logStep "Upgrading to Rook $step" + # temporarily set the ROOK_VERSION since the add-on script relies on it + local old_rook_version="$ROOK_VERSION" + export ROOK_VERSION="$step" + # shellcheck disable=SC1090 + addon_source "rook" "$step" # this will override the rook $ROOK_VERSION add-on functions + if commandExists "rook_should_fail_install" ; then + # NOTE: there is no way to know this is the correct rook version function + if rook_should_fail_install ; then + bail "Rook $desired_version will not be installed due to failed preflight checks" + fi + fi + # NOTE: there is no way to know this is the correct rook version function + rook # upgrade to the step version + ROOK_VERSION="$old_rook_version" + + # if this is not the last version in the loop, then delete the addon files to free up space + if [ "$step" != "$desired_version" ]; then + rm -f "$DIR/assets/rook-$step.tar.gz" + rm -rf "$DIR/addons/rook/$step" + fi + + logSuccess "Upgraded to Rook $step successfully" + done <<< "$(rook_upgrade_step_versions "${ROOK_STEP_VERSIONS[*]}" "$current_version" "$desired_version")" + + if [ -n "$AIRGAP_MULTI_ADDON_PACKAGE_PATH" ]; then + # delete the airgap package files to free up space + rm -f "$AIRGAP_MULTI_ADDON_PACKAGE_PATH" + fi +} + +# rook_upgrade_addon_fetch_and_load will fetch all add-on versions from $current_version to $desired_version. +function rook_upgrade_addon_fetch_and_load() { + if [ "$AIRGAP" = "1" ]; then + rook_upgrade_addon_fetch_and_load_airgap "$@" + else + rook_upgrade_addon_fetch_and_load_online "$@" + fi +} + +# rook_upgrade_addon_fetch_and_load_online will fetch all add-on versions, one at a time, from $current_version +# to $desired_version. +function rook_upgrade_addon_fetch_and_load_online() { + local current_version="$1" + local desired_version="$2" + + logStep "Downloading images required for Rook $current_version to $desired_version upgrade" + + if common_upgrade_is_version_included "$current_version" "$desired_version" "1.4" ; then + rook_upgrade_addon_fetch_and_load_online_step "rookupgrade" "10to14" + fi + + if [ "$(common_upgrade_compare_versions "$desired_version" "1.4")" = "1" ]; then + local step= + while read -r step; do + if [ -z "$step" ] || [ "$step" = "0.0.0" ]; then + continue + fi + if [ "$step" = "$ROOK_VERSION" ]; then + rook_upgrade_addon_fetch_and_load_online_step "rook" "$ROOK_VERSION" "$ROOK_S3_OVERRIDE" + else + rook_upgrade_addon_fetch_and_load_online_step "rook" "$step" + fi + done <<< "$(rook_upgrade_step_versions "${ROOK_STEP_VERSIONS[*]}" "$(common_upgrade_max_version "1.4" "$current_version")" "$desired_version")" + fi + + logSuccess "Images loaded for Rook $current_version to $desired_version upgrade" +} + +# rook_upgrade_addon_fetch_and_load_online_step will fetch an individual add-on version. +function rook_upgrade_addon_fetch_and_load_online_step() { + local addon="$1" + local version="$2" + local s3_override="$3" + + addon_fetch "$addon" "$version" "$s3_override" + addon_load "$addon" "$version" +} + +# rook_upgrade_addon_fetch_and_load_airgap will prompt the user to fetch all add-on versions from +# $current_version to $desired_version. +function rook_upgrade_addon_fetch_and_load_airgap() { + local current_version="$1" + local desired_version="$2" + + if rook_upgrade_has_all_addon_version_packages "$current_version" "$desired_version" ; then + local node_missing_images= + # shellcheck disable=SC2086 + node_missing_images=$(rook_upgrade_nodes_missing_images "$current_version" "$desired_version" "$(get_local_node_name)" "") + + if [ -z "$node_missing_images" ]; then + log "All images required for Rook $current_version to $desired_version upgrade are present on this node" + return + fi + fi + + logStep "Downloading images required for Rook $current_version to $desired_version upgrade" + + local addon_versions=() + + if common_upgrade_is_version_included "$current_version" "$desired_version" "1.4" ; then + addon_versions+=( "rookupgrade-10to14" ) + fi + + if [ "$(common_upgrade_compare_versions "$desired_version" "1.4")" = "1" ]; then + local step= + while read -r step; do + if [ -z "$step" ] || [ "$step" = "0.0.0" ]; then + continue + fi + # the last version already included in the airgap bundle + if [ "$step" = "$desired_version" ]; then + continue + fi + addon_versions+=( "rook-$step" ) + done <<< "$(rook_upgrade_step_versions "${ROOK_STEP_VERSIONS[*]}" "$(common_upgrade_max_version "1.4" "$current_version")" "$desired_version")" + fi + + addon_fetch_multiple_airgap "${addon_versions[@]}" + + if common_upgrade_is_version_included "$current_version" "$desired_version" "1.4" ; then + addon_load "rookupgrade" "10to14" + fi + + if [ "$(common_upgrade_compare_versions "$desired_version" "1.4")" = "1" ]; then + local step= + while read -r step; do + if [ -z "$step" ] || [ "$step" = "0.0.0" ]; then + continue + fi + addon_load "rook" "$step" + done <<< "$(rook_upgrade_step_versions "${ROOK_STEP_VERSIONS[*]}" "$(common_upgrade_max_version "1.4" "$current_version")" "$desired_version")" + fi + + logSuccess "Images loaded for Rook $current_version to $desired_version upgrade" +} + +# rook_upgrade_has_all_addon_version_packages will return 1 if any add-on versions are missing that +# are necessary to perform the upgrade. +function rook_upgrade_has_all_addon_version_packages() { + local current_version="$1" + local desired_version="$2" + + if common_upgrade_is_version_included "$current_version" "$desired_version" "1.4" ; then + if [ ! -f "addons/rookupgrade/10to14/Manifest" ]; then + return 1 + fi + fi + + if [ "$(common_upgrade_compare_versions "$desired_version" "1.4")" = "1" ]; then + local step= + while read -r step; do + if [ -z "$step" ] || [ "$step" = "0.0.0" ]; then + continue + fi + if [ ! -f "addons/rook/$step/Manifest" ]; then + return 1 + fi + done <<< "$(rook_upgrade_step_versions "${ROOK_STEP_VERSIONS[*]}" "$(common_upgrade_max_version "1.4" "$current_version")" "$desired_version")" + fi + + return 0 +} + +# rook_upgrade_prompt_missing_images prompts the user to run the command to load the images on all +# remote nodes before proceeding. +function rook_upgrade_prompt_missing_images() { + local current_version="$1" + local desired_version="$2" + + local node_missing_images= + # shellcheck disable=SC2086 + node_missing_images=$(rook_upgrade_nodes_missing_images "$current_version" "$desired_version" "" "$(get_local_node_name)") + + common_prompt_task_missing_assets "$node_missing_images" "$current_version" "$desired_version" "Rook" "rook-upgrade-load-images" +} + +# rook_upgrade_nodes_missing_images will print a list of nodes that are missing images for the +# given rook versions. +function rook_upgrade_nodes_missing_images() { + local current_version="$1" + local desired_version="$2" + local target_host="$3" + local exclude_hosts="$4" + + local images_list= + images_list="$(rook_upgrade_images_list "$current_version" "$desired_version")" + + if [ -z "$images_list" ]; then + return + fi + + kubernetes_nodes_missing_images "$images_list" "$target_host" "$exclude_hosts" +} + +# rook_upgrade_images_list will print a list of images for the given rook versions. +function rook_upgrade_images_list() { + local current_version="$1" + local desired_version="$2" + + local images_list= + + if common_upgrade_is_version_included "$current_version" "$desired_version" "1.4" ; then + images_list="$(rook_upgrade_list_rook_ceph_images_in_manifest_file "addons/rookupgrade/10to14/Manifest")" + fi + + if [ "$(common_upgrade_compare_versions "$desired_version" "1.4")" = "1" ]; then + local step= + while read -r step; do + if [ -z "$step" ] || [ "$step" = "0.0.0" ]; then + continue + fi + # the last version already included in the airgap bundle + if [ "$step" = "$desired_version" ]; then + continue + fi + images_list="$(common_upgrade_merge_images_list \ + "$images_list" \ + "$(rook_upgrade_list_rook_ceph_images_in_manifest_file "addons/rook/$step/Manifest")" \ + )" + done <<< "$(rook_upgrade_step_versions "${ROOK_STEP_VERSIONS[*]}" "$(common_upgrade_max_version "1.4" "$current_version")" "$desired_version")" + fi + + echo "$images_list" +} + +# rook_upgrade_list_rook_ceph_images_in_manifest_file will list the rook/ceph images in the given +# manifest file. +function rook_upgrade_list_rook_ceph_images_in_manifest_file() { + local manifest_file="$1" + + local image_list= + for image in $(grep "^image " "$manifest_file" | grep -F "rook/ceph" | awk '{print $3}' | tr '\n' ' ') ; do + image_list=$image_list" $(canonical_image_name "$image")" + done + echo "$image_list" | xargs # trim whitespace +} + +# rook_upgrade_step_versions returns a list of upgrade steps that need to be performed, based on +# the supplied space-delimited set of step versions, for use by other functions. This list is +# inclusive of the from_version. e.g. "1.5.12\n1.6.11\n1.7.11" +function rook_upgrade_step_versions() { + local step_versions= + read -ra step_versions <<< "$1" + local from_version=$2 + local desired_version=$3 + + local to_version= + to_version=$(common_upgrade_version_to_major_minor "$desired_version") + + # check that major versions are the same + local first_major= + first_major=$(common_upgrade_major_minor_to_major "$from_version") + local last_major= + last_major=$(common_upgrade_major_minor_to_major "$to_version") + if [ "$first_major" != "$last_major" ]; then + bail "Upgrade accross major version from $from_version to $to_version is not supported." + fi + + local first_minor= + local last_minor= + first_minor=$(common_upgrade_major_minor_to_minor "$from_version") + last_minor=$(common_upgrade_major_minor_to_minor "$to_version") + + if [ "${#step_versions[@]}" -le "$last_minor" ]; then + bail "Upgrade from $from_version to $to_version is not supported." + fi + + # if there are no steps to perform, return + if [ "$first_minor" -gt "$last_minor" ]; then + return + fi + + if [ "$desired_version" != "$to_version" ]; then + last_minor=$((last_minor - 1)) # last version is the desired version + fi + + local step= + for (( step=first_minor ; step<=last_minor ; step++ )); do + echo "${step_versions[$step]}" + done + if [ "$desired_version" != "$to_version" ]; then + echo "$desired_version" + fi +} + +# rook_upgrade_tasks_load_images is called from tasks.sh to load images on remote notes for the +# rook upgrade. +function rook_upgrade_tasks_load_images() { + local from_version= + local to_version= + local airgap= + common_upgrade_tasks_params "$@" + + common_task_require_param "from-version" "$from_version" + common_task_require_param "to-version" "$to_version" + + if [ "$airgap" = "1" ]; then + export AIRGAP=1 + fi + + export KUBECONFIG=/etc/kubernetes/admin.conf + download_util_binaries + + if ! rook_upgrade_storage_check "$from_version" "$to_version" ; then + bail "Failed storage check" + fi + + if ! rook_upgrade_addon_fetch_and_load "$from_version" "$to_version" ; then + bail "Failed to load images" + fi +} + +# rook_upgrade_required_archive_size will determine the size of the archive that will be downloaded +# to upgrade between the supplied rook versions. The amount of space required within +# $KURL_INSTALL_DIRECTORY and /var/lib/containerd or /var/lib/docker can then be derived from this +# (2x archive size in kurl, 3.5x in containerd/docker). +function rook_upgrade_required_archive_size() { + local current_version="$1" + local desired_version="$2" + + semverParse "$current_version" + # shellcheck disable=SC2154 + local current_rook_version_major="$major" + # shellcheck disable=SC2154 + local current_rook_version_minor="$minor" + + semverParse "$desired_version" + local next_rook_version_major="$major" + local next_rook_version_minor="$minor" + + # if the major versions are not '1', exit with an error + if [ "$current_rook_version_major" != "1" ] || [ "$next_rook_version_major" != "1" ]; then + bail "Rook major versions must be 1" + fi + + local total_archive_size=0 + if [ "$current_rook_version_minor" -lt 4 ] && [ "$next_rook_version_minor" -ge 4 ]; then + total_archive_size=$((total_archive_size + 3400)) # 3.4 GB for the 1.0 to 1.4 archive + total_archive_size=$((total_archive_size + 1300)) # 1.3 GB for the 1.4 archive + fi + if [ "$current_rook_version_minor" -lt 5 ] && [ "$next_rook_version_minor" -ge 5 ]; then + total_archive_size=$((total_archive_size + 1400)) # 1.4 GB for the 1.5 archive + fi + if [ "$current_rook_version_minor" -lt 6 ] && [ "$next_rook_version_minor" -ge 6 ]; then + total_archive_size=$((total_archive_size + 1400)) # 1.4 GB for the 1.6 archive + fi + if [ "$current_rook_version_minor" -lt 7 ] && [ "$next_rook_version_minor" -ge 7 ]; then + total_archive_size=$((total_archive_size + 1500)) # 1.5 GB for the 1.7 archive + fi + if [ "$current_rook_version_minor" -lt 8 ] && [ "$next_rook_version_minor" -ge 8 ]; then + total_archive_size=$((total_archive_size + 1700)) # 1.7 GB for the 1.8 archive + fi + if [ "$current_rook_version_minor" -lt 9 ] && [ "$next_rook_version_minor" -ge 9 ]; then + total_archive_size=$((total_archive_size + 1800)) # 1.8 GB for the 1.9 archive + fi + if [ "$current_rook_version_minor" -lt 10 ] && [ "$next_rook_version_minor" -ge 10 ]; then + total_archive_size=$((total_archive_size + 1800)) # 1.8 GB for the 1.10 archive + fi + + # add 2gb for each version past 1.10 + # TODO handle starting from a version past 1.10 + if [ "$next_rook_version_minor" -gt 10 ]; then + total_archive_size=$((total_archive_size + 2000 * (next_rook_version_minor - 10))) + fi + + echo "$total_archive_size" +} + +function longhorn_host_init_common() { + longhorn_install_iscsi_if_missing_common $1 + longhorn_install_nfs_utils_if_missing_common $1 + mkdir -p /var/lib/longhorn + chmod 700 /var/lib/longhorn +} + +function longhorn_install_iscsi_if_missing_common() { + local src=$1 + + if ! systemctl list-units | grep -q iscsid ; then + case "$LSB_DIST" in + ubuntu) + dpkg_install_host_archives "$src" open-iscsi + ;; + + centos|rhel|ol|rocky|amzn) + if is_rhel_9_variant ; then + yum_ensure_host_package iscsi-initiator-utils + else + yum_install_host_archives "$src" iscsi-initiator-utils + fi + ;; + esac + fi + + if ! systemctl -q is-active iscsid; then + systemctl start iscsid + fi + + if ! systemctl -q is-enabled iscsid; then + systemctl enable iscsid + fi +} + +function longhorn_install_nfs_utils_if_missing_common() { + local src=$1 + + if ! systemctl list-units | grep -q nfs-utils ; then + case "$LSB_DIST" in + ubuntu) + dpkg_install_host_archives "$src" nfs-common + ;; + + centos|rhel|ol|rocky|amzn) + if is_rhel_9_variant ; then + yum_ensure_host_package nfs-utils + else + yum_install_host_archives "$src" nfs-utils + fi + ;; + esac + fi + + if ! systemctl -q is-active nfs-utils; then + systemctl start nfs-utils + fi + + if ! systemctl -q is-enabled nfs-utils; then + systemctl enable nfs-utils + fi +} + +# longhorn_run_pvmigrate calls pvmigrate to migrate longhorn data to a different storage class. if a failure happen +# then rolls back the original number of volumes and replicas. +function longhorn_run_pvmigrate() { + local longhornStorageClass=$1 + local destStorageClass=$2 + local didRunValidationChecks=$3 + local setDefaults=$4 + + local skipFreeSpaceCheckFlag="" + local skipPreflightValidationFlag="" + if [ "$didRunValidationChecks" == "1" ]; then + skipFreeSpaceCheckFlag="--skip-free-space-check" + skipPreflightValidationFlag="--skip-preflight-validation" + fi + + local setDefaultsFlag="" + if [ "$setDefaults" == "1" ]; then + setDefaultsFlag="--set-defaults" + fi + + if ! $BIN_PVMIGRATE --source-sc "$longhornStorageClass" --dest-sc "$destStorageClass" --rsync-image "$KURL_UTIL_IMAGE" "$skipFreeSpaceCheckFlag" "$skipPreflightValidationFlag" "$setDefaultsFlag"; then + longhorn_restore_migration_replicas + return 1 + fi + return 0 +} + +# scale down prometheus, move all 'longhorn' PVCs to provided storage class, scale up prometheus +# Supported storage class migrations from longhorn are: 'rook' and 'openebs' +function longhorn_to_sc_migration() { + local destStorageClass=$1 + local didRunValidationChecks=$2 + local scProvisioner + scProvisioner=$(kubectl get sc "$destStorageClass" -ojsonpath='{.provisioner}') + + # we only support migrating to 'rook' and 'openebs' storage classes + if [[ "$scProvisioner" != *"rook"* ]] && [[ "$scProvisioner" != *"openebs"* ]]; then + bail "Longhorn to $scProvisioner migration is not supported" + fi + + report_addon_start "longhorn-to-${scProvisioner}-migration" "v1" + + # set prometheus scale if it exists + local ekcoScaledDown=0 + if kubectl get namespace monitoring &>/dev/null; then + if kubectl -n monitoring get prometheus k8s &>/dev/null; then + # before scaling down prometheus, scale down ekco as it will otherwise restore the prometheus scale + if kubernetes_resource_exists kurl deployment ekc-operator; then + ekcoScaledDown=1 + kubectl -n kurl scale deploy ekc-operator --replicas=0 + log "Waiting for ekco pods to be removed" + if ! spinner_until 120 ekco_pods_gone; then + logFail "Unable to scale down ekco operator" + return 1 + fi + fi + + kubectl -n monitoring patch prometheus k8s --type='json' --patch '[{"op": "replace", "path": "/spec/replicas", value: 0}]' + log "Waiting for prometheus pods to be removed" + spinner_until 300 prometheus_pods_gone + fi + fi + + # scale down ekco if kotsadm is using rqlite. + if kubernetes_resource_exists default statefulset kotsadm-rqlite ; then + if [ "$ekcoScaledDown" = "0" ]; then + if kubernetes_resource_exists kurl deployment ekc-operator; then + kubectl -n kurl scale deploy ekc-operator --replicas=0 + log "Waiting for ekco pods to be removed" + if ! spinner_until 120 ekco_pods_gone; then + logFail "Unable to scale down ekco operator" + return 1 + fi + fi + fi + fi + + longhornStorageClasses=$(kubectl get storageclass | grep longhorn | grep -v '(default)' | awk '{ print $1}') # any non-default longhorn StorageClasses + for longhornStorageClass in $longhornStorageClasses + do + if ! longhorn_run_pvmigrate "$longhornStorageClass" "$destStorageClass" "$didRunValidationChecks" "0"; then + bail "Failed to migrate PVCs from $longhornStorageClass to $destStorageClass" + fi + done + + longhornDefaultStorageClass=$(kubectl get storageclass | grep longhorn | grep '(default)' | awk '{ print $1}') # any default longhorn StorageClasses + for longhornStorageClass in $longhornDefaultStorageClass + do + if ! longhorn_run_pvmigrate "$longhornStorageClass" "$destStorageClass" "$didRunValidationChecks" "1"; then + bail "Failed to migrate PVCs from $longhornStorageClass to $destStorageClass" + fi + kubectl annotate storageclass "$longhornStorageClass" storageclass.kubernetes.io/is-default-class- + done + + longhorn_restore_migration_replicas + + # print success message + logSuccess "Migration from longhorn to $scProvisioner completed successfully!" + report_addon_success "longhorn-to-$scProvisioner-migration" "v1" +} + +# if PVCs and object store data have both been migrated from longhorn and longhorn is no longer specified in the kURL spec, remove longhorn +function maybe_cleanup_longhorn() { + if [ -z "$LONGHORN_VERSION" ]; then + # Just continue if longhorn is installed. + if ! kubectl get ns | grep -q longhorn-system; then + return + fi + logStep "Removing Longhorn" + if [ "$DID_MIGRATE_LONGHORN_PVCS" == "1" ]; then + report_addon_start "longhorn-removal" "v1" + remove_longhorn + report_addon_success "longhorn-removal" "v1" + return + fi + + logFail "Unable to remove Longhorn." + if [ "$DID_MIGRATE_LONGHORN_PVCS" != "1" ]; then + logFail "Storage class migration did not succeed" + fi + fi +} + +# longhorn_pvs_removed returns true when we can't find any pv using the longhorn csi driver. +function longhorn_pvs_removed() { + local pvs + pvs=$(kubectl get pv -o=jsonpath='{.items[*].spec.csi.driver}' | grep "longhorn" | wc -l) + [ "$pvs" = "0" ] +} + +# remove_longhorn deletes everything longhorn releated: deployments, CR objects, and CRDs. +function remove_longhorn() { + # make sure there aren't any PVs using longhorn before deleting it + log "Waiting for Longhorn PVs to be removed" + if ! spinner_until 60 longhorn_pvs_removed; then + # sometimes longhorn hangs and we need to restart kubelet to make it work again, we + # are going to give this approach a try here before bailing out. + logWarn "Some Longhorn PVs are still online, trying to restart kubelet." + systemctl restart kubelet + log "Waiting for Longhorn PVs to be removed" + if ! spinner_until 60 longhorn_pvs_removed; then + logFail "There are still PVs using Longhorn." + logFail "Remove these PVs before continuing." + kubectl get pv -o=jsonpath='{.items[*].spec.csi.driver}' | grep "longhorn" + exit 1 + fi + fi + + # scale ekco to 0 replicas if it exists + if kubernetes_resource_exists kurl deployment ekc-operator; then + kubectl -n kurl scale deploy ekc-operator --replicas=0 + log "Waiting for ekco pods to be removed" + if ! spinner_until 120 ekco_pods_gone; then + logWarn "Unable to scale down ekco operator" + fi + fi + + # remove longhorn volumes first so the operator can correctly delete them. + log "Removing Longhorn volumes:" + kubectl delete volumes.longhorn.io -n longhorn-system --all + + # once volumes have been gone we can remove all other longhorn CR objects. + log "Removing Longhorn custom resource objects - this may take some time:" + kubectl get crd | grep 'longhorn' | grep -v 'volumes' | awk '{ print $1 }' | xargs -I'{}' kubectl -n longhorn-system delete '{}' --all + + # delete longhorn CRDs + log "Removing Longhorn custom resources:" + kubectl get crd | grep 'longhorn' | awk '{ print $1 }' | xargs -I'{}' kubectl delete crd '{}' + + # delete longhorn ns + kubectl delete ns longhorn-system + + # delete longhorn storageclass(es) + log "Removing Longhorn StorageClasses" + kubectl get storageclass | grep longhorn | awk '{ print $1 }' | xargs -I'{}' kubectl delete storageclass '{}' + + # scale ekco back to 1 replicas if it exists + if kubernetes_resource_exists kurl deployment ekc-operator; then + kubectl -n kurl scale deploy ekc-operator --replicas=1 + fi + + logSuccess "Removed Longhorn successfully" +} + +# longhorn_prepare_for_migration checks if longhorn is healthy and if it is, it will scale down the all pods mounting +# longhorn volumes. if a failure happen during the preparation fase the migration won't be executed and the user will +# receive a message to restore the cluster to its previous state. +function longhorn_prepare_for_migration() { + if "$DIR"/bin/kurl longhorn prepare-for-migration; then + return 0 + fi + logFail "Preparation for longhorn migration failed. Please review the preceding messages for further details." + logFail "During the preparation for Longhorn, some replicas may have been scaled down to 0. Would you like to" + logFail "restore the system to its original state?" + if confirmY; then + log "Restoring Longhorn replicas to their original state" + longhorn_restore_migration_replicas + fi + return 1 +} + +# longhorn_restore_migration_replicas scales up all longhorn volumes, deployment and statefulset replicas to their +# original values. +function longhorn_restore_migration_replicas() { + "$DIR"/bin/kurl longhorn rollback-migration-replicas +} + +#!/bin/bash + +# kubernetes_upgrade_preflight checks if kubernetes should be upgraded, and if so prompts the user +# to confirm the upgrade. +function kubernetes_upgrade_preflight() { + local desired_version="$KUBERNETES_VERSION" + + if ! kubernetes_upgrade_should_upgrade_kubernetes ; then + return + fi + + local current_version= + current_version="$(kubernetes_upgrade_discover_min_kubernetes_version)" + + if ! kubernetes_upgrade_prompt "$current_version" "$desired_version" ; then + bail "Not upgrading Kubernetes" + fi + + # use CURRENT_KUBERNETES_VERSION as that is the lowest version on this node + if ! kubernetes_upgrade_storage_check "$CURRENT_KUBERNETES_VERSION" "$desired_version" ; then + bail "Not upgrading Kubernetes" + fi +} + +# report_upgrade_kubernetes starts the kubernetes upgrade process. +function report_upgrade_kubernetes() { + local desired_version="$KUBERNETES_VERSION" + + if ! kubernetes_upgrade_should_upgrade_kubernetes ; then + enable_rook_ceph_operator + return + fi + + local current_version= + current_version="$(kubernetes_upgrade_discover_min_kubernetes_version)" + + kubernetes_upgrade_report_upgrade_kubernetes "$current_version" "$desired_version" +} + +# kubernetes_upgrade_discover_min_kubernetes_version will return the lowest kubernetes version on +# the cluster. +function kubernetes_upgrade_discover_min_kubernetes_version() { + if [ -z "$CURRENT_KUBERNETES_VERSION" ]; then + return + fi + + # These versions are for the local primary + semverParse "$CURRENT_KUBERNETES_VERSION" + # shellcheck disable=SC2154 + local min_minor="$minor" + # shellcheck disable=SC2154 + local min_patch="$patch" + + # Check for upgrades required on remote primaries + for i in "${!KUBERNETES_REMOTE_PRIMARIES[@]}" ; do + semverParse "${KUBERNETES_REMOTE_PRIMARY_VERSIONS[$i]}" + if [ "$minor" -lt "$min_minor" ] || { [ "$minor" -eq "$min_minor" ] && [ "$patch" -lt "$min_patch" ]; }; then + min_minor="$minor" + min_patch="$patch" + fi + done + + # Check for upgrades required on secondaries + for i in "${!KUBERNETES_SECONDARIES[@]}" ; do + semverParse "${KUBERNETES_SECONDARY_VERSIONS[$i]}" + if [ "$minor" -lt "$min_minor" ] || { [ "$minor" -eq "$min_minor" ] && [ "$patch" -lt "$min_patch" ]; }; then + min_minor="$minor" + min_patch="$patch" + fi + done + + echo "1.$min_minor.$min_patch" +} + +# kubernetes_upgrade_report_upgrade_kubernetes reports the upgrade and starts the upgrade process. +function kubernetes_upgrade_report_upgrade_kubernetes() { + local current_version="$1" + local desired_version="$2" + + local from_version= + from_version="$(common_upgrade_version_to_major_minor "$current_version")" + + local kubernetes_upgrade_version="v1.0.0" # if you change this code, change the version + report_addon_start "kubernetes_upgrade_${from_version}_to_${desired_version}" "$kubernetes_upgrade_version" + export REPORTING_CONTEXT_INFO="kubernetes_upgrade_${from_version}_to_${desired_version} $kubernetes_upgrade_version" + kubernetes_upgrade "$current_version" "$desired_version" + export REPORTING_CONTEXT_INFO="" + report_addon_success "kubernetes_upgrade_${from_version}_to_${desired_version}" "$kubernetes_upgrade_version" +} + +# kubernetes_upgrade upgrades will fetch the add-on and load the images for the upgrade and finally +# run the upgrade script. +function kubernetes_upgrade() { + local current_version="$1" + local desired_version="$2" + + disable_rook_ceph_operator + + # when invoked in a subprocess the failure of this function will not cause the script to exit + # sanity check that the version is valid + common_upgrade_step_versions "${STEP_VERSIONS[*]}" "$current_version" "$desired_version" 1>/dev/null + + logStep "Upgrading Kubernetes from $current_version to $desired_version" + common_upgrade_print_list_of_minor_upgrades "$current_version" "$desired_version" + echo "This may take some time." + kubernetes_upgrade_addon_fetch "$current_version" "$desired_version" + + kubernetes_upgrade_prompt_missing_assets "$current_version" "$desired_version" + + kubernetes_upgrade_do_kubernetes_upgrade "$current_version" "$desired_version" + + enable_rook_ceph_operator + + logSuccess "Successfully upgraded Kubernetes from $current_version to $desired_version" +} + +# kubernetes_upgrade_do_kubernetes_upgrade will step through each minor version upgrade from +# $current_version to $desired_version +function kubernetes_upgrade_do_kubernetes_upgrade() { + local current_version="$1" + local desired_version="$2" + + local step= + while read -r step ; do + if [ -z "$step" ] || [ "$step" = "0.0.0" ]; then + continue + fi + if [ ! -d "$DIR/packages/kubernetes/$step/assets" ] ; then + bail "Kubernetes version $step not found" + fi + logStep "Upgrading cluster to Kubernetes version $step" + + upgrade_kubernetes_local_master "$step" + upgrade_kubernetes_remote_masters "$step" + upgrade_kubernetes_workers "$step" + + # if this is not the last version in the loop, then delete the addon files to free up space + if [ "$step" != "$desired_version" ]; then + rm -f "$DIR/assets/kubernetes-$step.tar.gz" + rm -rf "$DIR/packages/kubernetes/$step" + fi + + # workaround as some code relies on this legacy label + kubectl label --overwrite node --selector="node-role.kubernetes.io/control-plane" node-role.kubernetes.io/master= + + logSuccess "Cluster upgraded to Kubernetes version $step successfully" + done <<< "$(common_upgrade_step_versions "${STEP_VERSIONS[*]}" "$current_version" "$desired_version")" + + if [ -n "$AIRGAP_MULTI_ADDON_PACKAGE_PATH" ]; then + # delete the airgap package files to free up space + rm -f "$AIRGAP_MULTI_ADDON_PACKAGE_PATH" + fi +} + +# kubernetes_upgrade_should_upgrade_kubernetes uses the KUBERNETES_UPGRADE environment variable set +# by discoverCurrentKubernetesVersion() +function kubernetes_upgrade_should_upgrade_kubernetes() { + [ "$KUBERNETES_UPGRADE" = "1" ] +} + +# kubernetes_upgrade_prompt prompts the user to confirm the kubernetes upgrade. +function kubernetes_upgrade_prompt() { + local current_version="$1" + local desired_version="$2" + logWarn "$(printf "This script will upgrade Kubernetes from %s to %s." "$current_version" "$desired_version")" + logWarn "Upgrading Kubernetes will take some time." + printf "Would you like to continue? " + + confirmY +} + +# kubernetes_upgrade_storage_check verifies that enough disk space exists for the kubernetes +# upgrade to complete successfully. +function kubernetes_upgrade_storage_check() { + local current_version="$1" + local desired_version="$2" + + local archive_size= + archive_size="$(kubernetes_upgrade_required_archive_size "$current_version" "$desired_version")" + + # 2x archive size for extracted files + # 3.5x archive size for container images + common_upgrade_storage_check "$archive_size" 2 $((7/2)) "Kubernetes" +} + +# kubernetes_upgrade_required_archive_size will determine the approximate size of the archive that +# will be downloaded to upgrade between the supplied kubernetes versions. The amount of space +# required within $KURL_INSTALL_DIRECTORY and /var/lib/containerd or /var/lib/docker can then be +# derived from this (2x archive size in kurl, 3.5x in containerd/docker). +function kubernetes_upgrade_required_archive_size() { + local current_version="$1" + local desired_version="$2" + + # 934.8 MB is the size of the kubernetes-1.26.3.tar.gz archive which is the largest archive + local bundle_size_upper_bounds=935 + + local total_archive_size=0 + local step= + while read -r step ; do + if [ -z "$step" ] || [ "$step" = "0.0.0" ]; then + continue + fi + total_archive_size=$((total_archive_size + bundle_size_upper_bounds)) + done <<< "$(common_upgrade_step_versions "${STEP_VERSIONS[*]}" "$current_version" "$desired_version")" + + echo "$total_archive_size" +} + +# kubernetes_upgrade_addon_fetch will fetch all add-on versions from $current_version to +# $desired_version. +function kubernetes_upgrade_addon_fetch() { + if [ "$AIRGAP" = "1" ]; then + kubernetes_upgrade_addon_fetch_airgap "$@" + else + kubernetes_upgrade_addon_fetch_online "$@" + fi +} + +# kubernetes_upgrade_addon_fetch_online will fetch all add-on versions, one at a time, from +# $current_version to $desired_version. +function kubernetes_upgrade_addon_fetch_online() { + local current_version="$1" + local desired_version="$2" + + logStep "Downloading assets required for Kubernetes $current_version to $desired_version upgrade" + + local step= + while read -r step ; do + if [ -z "$step" ] || [ "$step" = "0.0.0" ]; then + continue + fi + kubernetes_upgrade_addon_fetch_online_step "kubernetes" "$step" + done <<< "$(common_upgrade_step_versions "${STEP_VERSIONS[*]}" "$current_version" "$desired_version")" + + logSuccess "Assets loaded for Kubernetes $current_version to $desired_version upgrade" +} + +# kubernetes_upgrade_addon_fetch_online_step will fetch an individual add-on version. +function kubernetes_upgrade_addon_fetch_online_step() { + local version="$2" + + kubernetes_get_host_packages_online "$version" +} + +# kubernetes_upgrade_addon_fetch_airgap will prompt the user to fetch all add-on versions from +# $current_version to $desired_version. +function kubernetes_upgrade_addon_fetch_airgap() { + local current_version="$1" + local desired_version="$2" + + # the last version already included in the airgap bundle + local version_less_one= + version_less_one="$(common_upgrade_major_minor_less_one "$desired_version")" + + if kubernetes_upgrade_has_all_addon_version_packages "$current_version" "$version_less_one" ; then + local node_missing_images= + # shellcheck disable=SC2086 + node_missing_images=$(kubernetes_upgrade_nodes_missing_images "$current_version" "$version_less_one" "$(get_local_node_name)" "") + + if [ -z "$node_missing_images" ]; then + log "All assets required for Kubernetes $current_version to $desired_version upgrade are present on this node" + return + fi + fi + + logStep "Downloading assets required for Kubernetes $current_version to $desired_version upgrade" + + local addon_versions=() + + local step= + while read -r step ; do + if [ -z "$step" ] || [ "$step" = "0.0.0" ]; then + continue + fi + addon_versions+=( "kubernetes-$step" ) + done <<< "$(common_upgrade_step_versions "${STEP_VERSIONS[*]}" "$current_version" "$version_less_one")" + + addon_fetch_multiple_airgap "${addon_versions[@]}" + + logSuccess "Assets loaded for Kubernetes $current_version to $desired_version upgrade" +} + +# kubernetes_upgrade_has_all_addon_version_packages will return 1 if any add-on versions are +# missing that are necessary to perform the upgrade. +function kubernetes_upgrade_has_all_addon_version_packages() { + local current_version="$1" + local desired_version="$2" + + local step= + while read -r step ; do + if [ -z "$step" ] || [ "$step" = "0.0.0" ]; then + continue + fi + if [ ! -f "packages/kubernetes/$step/Manifest" ]; then + return 1 + fi + done <<< "$(common_upgrade_step_versions "${STEP_VERSIONS[*]}" "$current_version" "$desired_version")" + + return 0 +} + +# kubernetes_upgrade_prompt_missing_assets prompts the user to run the command to load assets on +# all remote nodes before proceeding. +function kubernetes_upgrade_prompt_missing_assets() { + local current_version="$1" + local desired_version="$2" + + # online installs will load assets as part of the upgrade.sh script + if [ "$AIRGAP" != "1" ]; then + return + fi + + # if we are only upgrading one minor version, then we don't need to prompt for assets as they + # are part of the airgap bundle + local version_less_one= + version_less_one="$(common_upgrade_major_minor_less_one "$desired_version")" + if [ "$(common_upgrade_compare_versions "$current_version" "$version_less_one")" -ge "0" ]; then + return + fi + + # always prompt on all nodes because assets are not only images + common_prompt_task_missing_assets \ + "$(kubernetes_remote_nodes | awk '{ print $1 }')" \ + "$current_version" "$desired_version" "Kubernetes" "kubernetes-upgrade-load-assets" +} + +# kubernetes_upgrade_nodes_missing_images will print a list of nodes that are missing images for +# the given kubernetes versions. +function kubernetes_upgrade_nodes_missing_images() { + local current_version="$1" + local desired_version="$2" + local target_host="$3" + local exclude_hosts="$4" + + local images_list= + images_list="$(kubernetes_upgrade_images_list "$current_version" "$desired_version")" + + if [ -z "$images_list" ]; then + return + fi + + kubernetes_nodes_missing_images "$images_list" "$target_host" "$exclude_hosts" +} + +# kubernetes_upgrade_images_list will print a list of images for the given kubernetes versions. +function kubernetes_upgrade_images_list() { + local current_version="$1" + local desired_version="$2" + + local images_list= + + local step= + while read -r step ; do + if [ -z "$step" ] || [ "$step" = "0.0.0" ]; then + continue + fi + images_list="$(common_upgrade_merge_images_list \ + "$images_list" \ + "$(common_list_images_in_manifest_file "packages/kubernetes/$step/Manifest")" \ + )" + done <<< "$(common_upgrade_step_versions "${STEP_VERSIONS[*]}" "$current_version" "$desired_version")" + + echo "$images_list" +} + +# kubernetes_upgrade_tasks_load_assets is called from tasks.sh to load assets on remote notes for the +# kubernetes upgrade. +function kubernetes_upgrade_tasks_load_assets() { + local from_version= + local to_version= + local airgap= + common_upgrade_tasks_params "$@" + + common_task_require_param "from-version" "$from_version" + common_task_require_param "to-version" "$to_version" + + if [ "$airgap" = "1" ]; then + export AIRGAP=1 + fi + + export KUBECONFIG=/etc/kubernetes/admin.conf + download_util_binaries + + if ! kubernetes_upgrade_storage_check "$from_version" "$to_version" ; then + bail "Failed storage check" + fi + + if ! kubernetes_upgrade_addon_fetch "$from_version" "$to_version" ; then + bail "Failed to load assets" + fi +} + +function upgrade_kubeadm() { + local k8sVersion=$1 + + upgrade_maybe_remove_kubeadm_network_plugin_flag "$k8sVersion" + + cp -f "$DIR/packages/kubernetes/$k8sVersion/assets/kubeadm" /usr/bin/ + chmod a+rx /usr/bin/kubeadm +} + +KUBERNETES_UPGRADE_IGNORE_PREFLIGHT_ERRORS="${KUBERNETES_UPGRADE_IGNORE_PREFLIGHT_ERRORS:-}" + +function upgrade_kubernetes_local_master() { + local targetK8sVersion="$1" + local nodeName= + nodeName="$(get_local_node_name)" + # shellcheck disable=SC2034 + local upgrading_kubernetes=true + + local nodeVersion= + nodeVersion="$(kubectl get node --no-headers "$nodeName" 2>/dev/null | awk '{ print $5 }' | sed 's/v//')" + if [ -z "$nodeVersion" ]; then + nodeVersion="$(discover_local_kubernetes_version)" + fi + + # check if the node is already at the target version + semverCompare "$nodeVersion" "$targetK8sVersion" + if [ "$SEMVER_COMPARE_RESULT" -ge "0" ]; then + log "Node $nodeName is already at Kubernetes version $targetK8sVersion" + return 0 + fi + + logStep "Upgrading local node to Kubernetes version $targetK8sVersion" + + kubernetes_load_images "$targetK8sVersion" + + upgrade_kubeadm "$targetK8sVersion" + + ( set -x; kubeadm upgrade plan "v${targetK8sVersion}" --ignore-preflight-errors="$KUBERNETES_UPGRADE_IGNORE_PREFLIGHT_ERRORS" ) + printf "%bDrain local node and apply upgrade? %b" "$YELLOW" "$NC" + confirmY + kubernetes_drain "$nodeName" + + maybe_patch_node_cri_socket_annotation "$nodeName" + + spinner_kubernetes_api_stable + # ignore-preflight-errors, do not fail on fail to pull images for airgap + ( set -x; kubeadm upgrade apply "v$targetK8sVersion" --yes --force --ignore-preflight-errors=all ) + upgrade_etcd_image_18 "$targetK8sVersion" + + # kubelet command line argument, '--container-runtime', was removed in Kubernetes 1.27 + upgrade_should_remove_container_runtime_flag "$targetK8sVersion" + + kubernetes_install_host_packages "$targetK8sVersion" + systemctl daemon-reload + systemctl restart kubelet + + spinner_kubernetes_api_stable + kubectl uncordon "$nodeName" + upgrade_delete_node_flannel "$nodeName" + + # force deleting the cache because the api server will use the stale API versions after kubeadm upgrade + rm -rf "$HOME/.kube" + + spinner_until 120 kubernetes_node_has_version "$nodeName" "$targetK8sVersion" + spinner_until 120 kubernetes_all_nodes_ready + + logSuccess "Local node upgraded to Kubernetes version $targetK8sVersion" +} + +function upgrade_kubernetes_remote_masters() { + local k8sVersion="$1" + while read -r node ; do + local nodeName= + nodeName=$(echo "$node" | awk '{ print $1 }') + logStep "Upgrading remote primary node $nodeName to Kubernetes version $k8sVersion" + upgrade_kubernetes_remote_node "$node" "$k8sVersion" + logSuccess "Remote primary node $nodeName upgraded to Kubernetes version $k8sVersion" + done < <(try_1m kubernetes_remote_masters) + spinner_until 120 kubernetes_all_nodes_ready +} + +function upgrade_kubernetes_workers() { + local k8sVersion="$1" + while read -r node ; do + local nodeName= + nodeName=$(echo "$node" | awk '{ print $1 }') + logStep "Upgrading remote worker node $nodeName to Kubernetes version $k8sVersion" + upgrade_kubernetes_remote_node "$node" "$k8sVersion" + logSuccess "Remote worker node $nodeName upgraded to Kubernetes version $k8sVersion" + done < <(try_1m kubernetes_workers) +} + +function upgrade_kubernetes_remote_node() { + # one line of output from `kubectl get nodes` + local node="$1" + local targetK8sVersion="$2" + + local nodeName= + nodeName=$(echo "$node" | awk '{ print $1 }') + local nodeVersion= + nodeVersion="$(echo "$node" | awk '{ print $5 }' | sed 's/v//' )" + + # check if the node is already at the target version + semverCompare "$nodeVersion" "$targetK8sVersion" + if [ "$SEMVER_COMPARE_RESULT" -ge "0" ]; then + log "Node $nodeName is already at Kubernetes version $targetK8sVersion" + return 0 + fi + + DOCKER_REGISTRY_IP=$(kubectl -n kurl get service registry -o=jsonpath='{@.spec.clusterIP}' 2>/dev/null || echo "") + + printf "\n%bDrain node $nodeName to prepare for upgrade? %b" "$YELLOW" "$NC" + confirmY + kubernetes_drain "$nodeName" + + local common_flags + common_flags="${common_flags}$(get_docker_registry_ip_flag "${DOCKER_REGISTRY_IP}")" + + local no_proxy_addresses="" + [ -n "$ADDITIONAL_NO_PROXY_ADDRESSES" ] && no_proxy_addresses="$ADDITIONAL_NO_PROXY_ADDRESSES" + [ -n "$service_cidr" ] && no_proxy_addresses="${no_proxy_addresses:+$no_proxy_addresses,}$service_cidr" + [ -n "$pod_cidr" ] && no_proxy_addresses="${no_proxy_addresses:+$no_proxy_addresses,}$pod_cidr" + [ -n "$no_proxy_addresses" ] && common_flags="${common_flags}$(get_additional_no_proxy_addresses_flag 1 "$no_proxy_addresses")" + + common_flags="${common_flags}$(get_kurl_install_directory_flag "${KURL_INSTALL_DIRECTORY_FLAG}")" + common_flags="${common_flags}$(get_remotes_flags)" + + printf "\n\n\tRun the upgrade script on remote node to proceed: %b%s%b\n\n" "$GREEN" "$nodeName" "$NC" + + if [ "$AIRGAP" = "1" ]; then + printf "\t%bcat ./upgrade.sh | sudo bash -s airgap kubernetes-version=%s%s%b\n\n" "$GREEN" "$targetK8sVersion" "$common_flags" "$NC" + else + local prefix= + prefix="$(build_installer_prefix "${INSTALLER_ID}" "${KURL_VERSION}" "${KURL_URL}" "${PROXY_ADDRESS}")" + + printf "\t%b %supgrade.sh | sudo bash -s kubernetes-version=%s%s%b\n\n" "$GREEN" "$prefix" "$targetK8sVersion" "$common_flags" "$NC" + fi + + rm -rf "$HOME/.kube" + + spinner_until -1 kubernetes_node_has_version "$nodeName" "$targetK8sVersion" + logSuccess "Kubernetes $targetK8sVersion detected on $nodeName" + + kubectl uncordon "$nodeName" + upgrade_delete_node_flannel "$nodeName" + spinner_until 120 kubernetes_all_nodes_ready +} + +# In k8s 1.18 the etcd image tag changed from 3.4.3 to 3.4.3-0 but kubeadm does not rewrite the +# etcd manifest to use the new tag. When kubeadm init is run after the upgrade it switches to the +# tag and etcd takes a few minutes to restart, which often results in kubeadm init failing. This +# forces use of the updated tag so that the restart of etcd happens during upgrade when the node is +# already drained +function upgrade_etcd_image_18() { + semverParse "$1" + if [ "$minor" != "18" ]; then + return 0 + fi + local etcd_tag= + etcd_tag=$(kubeadm config images list 2>/dev/null | grep etcd | awk -F':' '{ print $NF }') + sed -i "s/image: k8s.gcr.io\/etcd:.*/image: k8s.gcr.io\/etcd:$etcd_tag/" /etc/kubernetes/manifests/etcd.yaml +} + +# Workaround to fix "kubeadm upgrade node" error: +# "error execution phase preflight: docker is required for container runtime: exec: "docker": executable file not found in $PATH" +# See https://github.com/kubernetes/kubeadm/issues/2364 +function maybe_patch_node_cri_socket_annotation() { + local node="$1" + + if [ -n "$DOCKER_VERSION" ] || [ -z "$CONTAINERD_VERSION" ]; then + return + fi + + if kubectl get node "$node" -ojsonpath='{.metadata.annotations.kubeadm\.alpha\.kubernetes\.io/cri-socket}' | grep -q "dockershim.sock" ; then + kubectl annotate node "$node" --overwrite "kubeadm.alpha.kubernetes.io/cri-socket=unix:///run/containerd/containerd.sock" + fi +} + +# When there has been a migration from Docker to Containerd the kubeadm-flags.env file may contain +# the flag "--network-plugin" which has been removed as of Kubernetes 1.24 and causes the Kubelet +# to fail with "Error: failed to parse kubelet flag: unknown flag: --network-plugin". This function +# will remove the erroneous flag from the file. +function upgrade_maybe_remove_kubeadm_network_plugin_flag() { + local k8sVersion=$1 + if [ "$(kubernetes_version_minor "$k8sVersion")" -lt "24" ]; then + return + fi + sed -i 's/ \?--network-plugin \?[^ "]*//' /var/lib/kubelet/kubeadm-flags.env +} + +# delete the flannel pod on the node so that CNI plugin binaries are recreated +# workaround for https://github.com/kubernetes/kubernetes/issues/115629 +function upgrade_delete_node_flannel() { + local node="$1" + + if kubectl get ns 2>/dev/null | grep -q kube-flannel; then + kubectl delete pod -n kube-flannel --field-selector="spec.nodeName=$node" + fi +} + +# Kubernetes 1.24 deprecated the '--container-runtime' kubelet argument in 1.24 and removed it in 1.27 +# See: https://kubernetes.io/blog/2023/03/17/upcoming-changes-in-kubernetes-v1-27/#removal-of-container-runtime-command-line-argument +function upgrade_should_remove_container_runtime_flag() { + local k8sVersion=$1 + if [ "$(kubernetes_version_minor "$k8sVersion")" -ge "27" ]; then + sed -i 's/--container-runtime=remote //' "$KUBELET_FLAGS_FILE" + fi +} + + +function download_util_binaries() { + if [ -z "$AIRGAP" ] && [ -n "$DIST_URL" ]; then + package_download "${KURL_BIN_UTILS_FILE}" + tar xzf "$(package_filepath "${KURL_BIN_UTILS_FILE}")" + fi + + export BIN_KURL=$DIR/bin/kurl + BIN_SYSTEM_CONFIG=$DIR/bin/config + BIN_YAMLUTIL=$DIR/bin/yamlutil + BIN_DOCKER_CONFIG=$DIR/bin/docker-config + BIN_SUBNET=$DIR/bin/subnet + BIN_INSTALLERMERGE=$DIR/bin/installermerge + BIN_YAMLTOBASH=$DIR/bin/yamltobash + BIN_BASHTOYAML=$DIR/bin/bashmerge + BIN_PVMIGRATE=$DIR/bin/pvmigrate + export BIN_ROOK_PVMIGRATOR=$DIR/bin/rook-pv-migrator + + mkdir -p /tmp/kurl-bin-utils/scripts + CONFIGURE_SELINUX_SCRIPT=/tmp/kurl-bin-utils/scripts/configure_selinux.sh + CONFIGURE_FIREWALLD_SCRIPT=/tmp/kurl-bin-utils/scripts/configure_firewalld.sh + CONFIGURE_IPTABLES_SCRIPT=/tmp/kurl-bin-utils/scripts/configure_iptables.sh + + mkdir -p /tmp/kurl-bin-utils/specs + MERGED_YAML_SPEC=/tmp/kurl-bin-utils/specs/merged.yaml + VENDOR_PREFLIGHT_SPEC=/tmp/kurl-bin-utils/specs/vendor-preflight.yaml + + PARSED_YAML_SPEC=/tmp/kurl-bin-utils/scripts/variables.sh +} + +function apply_bash_flag_overrides() { + if [ -n "$1" ]; then + $BIN_BASHTOYAML -c $MERGED_YAML_SPEC -f "$*" + fi +} + +function parse_yaml_into_bash_variables() { + $BIN_YAMLTOBASH -i $MERGED_YAML_SPEC -b $PARSED_YAML_SPEC + + source $PARSED_YAML_SPEC + rm $PARSED_YAML_SPEC +} + +parse_kubernetes_target_version() { + semverParse "$KUBERNETES_VERSION" + KUBERNETES_TARGET_VERSION_MAJOR="$major" + KUBERNETES_TARGET_VERSION_MINOR="$minor" + KUBERNETES_TARGET_VERSION_PATCH="$patch" +} + + +function yaml_airgap() { + # this is needed because the parsing for yaml comes after the first occasion where the $AIRGAP flag is used + # we also account for if $INSTALLER_YAML spec has "$AIRGAP and "INSTALLER_SPEC_FILE spec turns it off" + + if [[ "$INSTALLER_YAML" =~ "airgap: true" ]]; then + AIRGAP="1" + fi + + if [ -n "$INSTALLER_SPEC_FILE" ]; then + if grep -q "airgap: true" $INSTALLER_SPEC_FILE; then + AIRGAP="1" + fi + if grep -q "airgap: false" $INSTALLER_SPEC_FILE; then + AIRGAP="" + fi + fi +} + +function get_patch_yaml() { + while [ "$1" != "" ]; do + _param="$(echo "$1" | cut -d= -f1)" + _value="$(echo "$1" | grep '=' | cut -d= -f2-)" + case $_param in + installer-spec-file) + if [ -n "$_value" ]; then + INSTALLER_SPEC_FILE="$(readlink -f "$_value")" # resolve relative paths before we pushd + fi + ;; + additional-no-proxy-addresses) + ;; + airgap) + AIRGAP="1" + ;; + kurl-registry-ip) + KURL_REGISTRY_IP="$_value" + ;; + cert-key) + ;; + control-plane) + ;; + docker-registry-ip) + ;; + ekco-enable-internal-load-balancer) + ;; + ha) + ;; + kubernetes-cis-compliance) + ;; + kubernetes-cluster-name) + ;; + aws-exclude-storage-class) + ;; + ignore-remote-load-images-prompt) + ;; + ignore-remote-upgrade-prompt) + ;; + container-log-max-size) + ;; + container-log-max-files) + ;; + kubernetes-max-pods-per-node) + ;; + kubeadm-token) + ;; + kubeadm-token-ca-hash) + ;; + kubernetes-load-balancer-use-first-primary) + ;; + kubernetes-master-address) + ;; + kubernetes-version) + ;; + kubernetes-init-ignore-preflight-errors) + ;; + kubernetes-upgrade-ignore-preflight-errors) + ;; + kurl-install-directory) + if [ -n "$_value" ]; then + KURL_INSTALL_DIRECTORY_FLAG="${_value}" + KURL_INSTALL_DIRECTORY="$(realpath ${_value})/kurl" + fi + ;; + labels) + NODE_LABELS="$_value" + ;; + load-balancer-address) + ;; + # Legacy Command + preflight-ignore) + ;; + host-preflight-ignore) + ;; + # Legacy Command + preflight-ignore-warnings) + ;; + host-preflight-enforce-warnings) + ;; + dismiss-host-packages-preflight) # possibly add this to the spec + # shellcheck disable=SC2034 + KURL_DISMISS_HOST_PACKAGES_PREFLIGHT=1 + ;; + preserve-docker-config) + ;; + preserve-firewalld-config) + ;; + preserve-iptables-config) + ;; + preserve-selinux-config) + ;; + public-address) + ;; + private-address) + ;; + yes) + ASSUME_YES=1 + ;; + auto-upgrades-enabled) # no longer supported + ;; + primary-host) + if [ -z "$PRIMARY_HOST" ]; then + PRIMARY_HOST="$_value" + else + PRIMARY_HOST="$PRIMARY_HOST,$_value" + fi + ;; + secondary-host) + if [ -z "$SECONDARY_HOST" ]; then + SECONDARY_HOST="$_value" + else + SECONDARY_HOST="$SECONDARY_HOST,$_value" + fi + ;; + # deprecated flag + force-reapply-addons) + logWarn "WARN: 'force-reapply-addon' option is deprecated" + ;; + skip-system-package-install) + SKIP_SYSTEM_PACKAGE_INSTALL=1 + ;; + # legacy command alias + exclude-builtin-preflights) + EXCLUDE_BUILTIN_HOST_PREFLIGHTS=1 + ;; + exclude-builtin-host-preflights) + EXCLUDE_BUILTIN_HOST_PREFLIGHTS=1 + ;; + app-version-label) + KOTSADM_APPLICATION_VERSION_LABEL="$_value" + ;; + ipv6) + IPV6_ONLY=1 + ;; + velero-restic-timeout) + VELERO_RESTIC_TIMEOUT="$_value" + ;; + velero-server-flags) + VELERO_SERVER_FLAGS="$_value" + ;; + *) + echo >&2 "Error: unknown parameter \"$_param\"" + exit 1 + ;; + esac + shift + done +} + +function merge_yaml_specs() { + if [ -z "$INSTALLER_SPEC_FILE" ] && [ -z "$INSTALLER_YAML" ]; then + echo "no yaml spec found" + bail + fi + + if [ -z "$INSTALLER_YAML" ]; then + cp -f $INSTALLER_SPEC_FILE $MERGED_YAML_SPEC + ONLY_APPLY_MERGED=1 + return + fi + + if [ -z "$INSTALLER_SPEC_FILE" ]; then + cat > $MERGED_YAML_SPEC < /tmp/vendor_kurl_installer_spec_docker.yaml </dev/null 2>&1 ; then + if [ -n "$(kubeadm_cluster_configuration | grep 'controlPlaneEndpoint:' | sed 's/controlPlaneEndpoint: \|"//g')" ]; then + HA_CLUSTER=1 + fi + fi +} + +function get_addon_config() { + local addon_name=$1 + addon_name=$(kebab_to_camel "$addon_name") + + $BIN_YAMLUTIL -j -fp $MERGED_YAML_SPEC -jf "spec.$addon_name" +} + +#!/bin/bash + +function render_yaml() { + eval "echo \"$(cat $DIR/yaml/$1)\"" +} + +function render_yaml_file() { + eval "echo \"$(cat $1)\"" +} + +function render_yaml_file_2() { + local file="$1" + if [ ! -f "$file" ]; then + logFail "File $file does not exist" + return 1 + fi + local data= + data=$(< "$file") + local delimiter="__apply_shell_expansion_delimiter__" + local command="cat <<$delimiter"$'\n'"$data"$'\n'"$delimiter" + eval "$command" +} + +function render_file() { + eval "echo \"$(cat $1)\"" +} + +function insert_patches_strategic_merge() { + local kustomization_file="$1" + local patch_file="$2" + + # Kubernetes 1.27 uses kustomize v5 which dropped support for old, legacy style patches + # See: https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.27.md#changelog-since-v1270 + if [ "$KUBERNETES_TARGET_VERSION_MINOR" -ge "27" ]; then + if [[ $kustomization_file =~ "prometheus" ]] || [[ $kustomization_file =~ "rook" ]]; then + # TODO: multi-doc patches is not currently supported in kustomize v5 + # continue using the deprecated 'patchesStrategicMerge' field until this is fixed + # Ref: https://github.com/kubernetes-sigs/kustomize/issues/5040 + if ! grep -q "patchesStrategicMerge" "$kustomization_file"; then + echo "patchesStrategicMerge:" >> "$kustomization_file" + fi + sed -i "/patchesStrategicMerge.*/a - $patch_file" "$kustomization_file" + else + if ! grep -q "^patches:" "$kustomization_file"; then + echo "patches:" >> "$kustomization_file" + fi + sed -i "/patches:/a - path: $patch_file" "$kustomization_file" + fi + return + fi + + if ! grep -q "patchesStrategicMerge" "$kustomization_file"; then + echo "patchesStrategicMerge:" >> "$kustomization_file" + fi + + sed -i "/patchesStrategicMerge.*/a - $patch_file" "$kustomization_file" +} + +function insert_resources() { + local kustomization_file="$1" + local resource_file="$2" + + if ! grep -q "resources[ \"]*:" "$kustomization_file"; then + echo "resources:" >> "$kustomization_file" + fi + + sed -i "/resources:.*/a - $resource_file" "$kustomization_file" +} + +function insert_bases() { + local kustomization_file="$1" + local base_file="$2" + + local kubectl_client_minor_version= + if commandExists "kubectl" ; then + kubectl_client_minor_version="$(kubectl version --short | grep -i client | awk '{ print $3 }' | cut -d '.' -f2)" + else + kubectl_client_minor_version="$(echo "$KUBERNETES_VERSION" | cut -d '.' -f2)" + fi + + # bases was deprecated in kustomize v2.1.0 in favor of resources + # https://github.com/kubernetes-sigs/kustomize/blob/661743c7e5bd8c3d9d6866b6bc0a6f0e0b0512eb/site/content/en/blog/releases/v2.1.0.md + # https://github.com/kubernetes-sigs/kustomize#kubectl-integration + # Kubectl version: v1.14-v1.20, Kustomize version: v2.0.3 + if [ -n "$kubectl_client_minor_version" ] && [ "$kubectl_client_minor_version" -gt "20" ]; then + insert_resources "$kustomization_file" "$base_file" + return + fi + + if ! grep -q "bases[ \"]*:" "$kustomization_file"; then + echo "bases:" >> "$kustomization_file" + fi + + sed -i "/bases:.*/a - $base_file" "$kustomization_file" +} + +function insert_patches_json_6902() { + local kustomization_file="$1" + local patch_file="$2" + local group="$3" + local version="$4" + local kind="$5" + local name="$6" + local namespace="$7" + + if ! grep -q "patchesJson6902" "$kustomization_file"; then + echo "patchesJson6902:" >> "$kustomization_file" + fi + +# 'fourspace_' and 'twospace_' are used because spaces at the beginning of each line are stripped + sed -i "/patchesJson6902.*/a- target:\n\ +fourspace_ group: $group\n\ +fourspace_ version: $version\n\ +fourspace_ kind: $kind\n\ +fourspace_ name: $name\n\ +fourspace_ namespace: $namespace\n\ +twospace_ path: $patch_file" "$kustomization_file" + + sed -i "s/fourspace_ / /" "$kustomization_file" + sed -i "s/twospace_ / /" "$kustomization_file" +} + +function setup_kubeadm_kustomize() { + local kubeadm_exclude= + local kubeadm_conf_api= + local kubeadm_cluster_config_v1beta2_file="kubeadm-cluster-config-v1beta2.yml" + local kubeadm_cluster_config_v1beta3_file="kubeadm-cluster-config-v1beta3.yml" + local kubeadm_init_config_v1beta2_file="kubeadm-init-config-v1beta2.yml" + local kubeadm_init_config_v1beta3_file="kubeadm-init-config-v1beta3.yml" + local kubeadm_join_config_v1beta2_file="kubeadm-join-config-v1beta2.yaml" + local kubeadm_join_config_v1beta3_file="kubeadm-join-config-v1beta3.yaml" + local kubeadm_init_src="$DIR/kustomize/kubeadm/init-orig" + local kubeadm_join_src="$DIR/kustomize/kubeadm/join-orig" + local kubeadm_init_dst="$DIR/kustomize/kubeadm/init" + local kubeadm_join_dst="$DIR/kustomize/kubeadm/join" + kubeadm_conf_api=$(kubeadm_conf_api_version) + + # Clean up the source directories for the kubeadm kustomize resources and + # patches. + rm -rf "$DIR/kustomize/kubeadm/init" + rm -rf "$DIR/kustomize/kubeadm/join" + rm -rf "$DIR/kustomize/kubeadm/init-patches" + rm -rf "$DIR/kustomize/kubeadm/join-patches" + + # Kubernete 1.26+ will use kubeadm/v1beta3 API + if [ "$KUBERNETES_TARGET_VERSION_MINOR" -ge "26" ]; then + # only include kubeadm/v1beta3 resources + kubeadm_exclude=("$kubeadm_cluster_config_v1beta2_file" "$kubeadm_init_config_v1beta2_file" "$kubeadm_join_config_v1beta2_file") + else + # only include kubeadm/v1beta2 resources + kubeadm_exclude=("$kubeadm_cluster_config_v1beta3_file" "$kubeadm_init_config_v1beta3_file" "$kubeadm_join_config_v1beta3_file") + fi + + # copy kubeadm kustomize resources + copy_kustomize_kubeadm_resources "$kubeadm_init_src" "$kubeadm_init_dst" "${kubeadm_exclude[@]}" + copy_kustomize_kubeadm_resources "$kubeadm_join_src" "$kubeadm_join_dst" "${kubeadm_exclude[@]}" + + # tell kustomize which resources to generate + # NOTE: 'eval' is used so that variables embedded within variables can be rendered correctly in the shell + eval insert_resources "$kubeadm_init_dst/kustomization.yaml" "\$kubeadm_cluster_config_${kubeadm_conf_api}_file" + eval insert_resources "$kubeadm_init_dst/kustomization.yaml" "\$kubeadm_init_config_${kubeadm_conf_api}_file" + eval insert_resources "$kubeadm_join_dst/kustomization.yaml" "\$kubeadm_join_config_${kubeadm_conf_api}_file" + + # create kubeadm kustomize patches directories + mkdir -p "$DIR/kustomize/kubeadm/init-patches" + mkdir -p "$DIR/kustomize/kubeadm/join-patches" + + if [ -n "$USE_STANDARD_PORT_RANGE" ]; then + sed -i 's/80-60000/30000-32767/g' "$DIR/kustomize/kubeadm/init/kubeadm-cluster-config-$kubeadm_conf_api.yml" + fi +} + +# copy_kustomize_kubeadm_resources copies kubeadm kustomize resources +# from source ($1) to destination ($2) and excludes files specified as +# variable number of arguments. +# E.g. copy_kustomize_kubeadm_resources \ +# "/var/lib/kurl/kustomize/kubeadm/init-orig" \ +# "/var/lib/kurl/kustomize/kubeadm/init" \ +# "kubeadm-cluster-config-v1beta2.yml" \ +# "kubeadm-init-config-v1beta2.yml" \ +# "kubeadm-join-config-v1beta2.yml" +function copy_kustomize_kubeadm_resources() { + local kustomize_kubeadm_src_dir=$1 + local kustomize_kubeadm_dst_dir=$2 + local excluded_files=("${@:3}") + + # ensure destination exist + mkdir -p "$kustomize_kubeadm_dst_dir" + + # copy kustomize resources from source to destination directory + # but exclude files in $excluded_files. + for file in "$kustomize_kubeadm_src_dir"/*; do + filename=$(basename "$file") + excluded=false + for excluded_file in "${excluded_files[@]}"; do + if [ "$filename" = "$excluded_file" ]; then + excluded=true + break + fi + done + if ! $excluded; then + cp "$file" "$kustomize_kubeadm_dst_dir" + fi + done +} + +function apply_installer_crd() { + INSTALLER_CRD_DEFINITION="$DIR/kurlkinds/cluster.kurl.sh_installers.yaml" + kubectl apply -f "$INSTALLER_CRD_DEFINITION" + + if [ -z "$ONLY_APPLY_MERGED" ] && [ -n "$INSTALLER_YAML" ]; then + ORIGINAL_INSTALLER_SPEC=/tmp/kurl-bin-utils/specs/original.yaml + cat > $ORIGINAL_INSTALLER_SPEC </dev/null 2>&1 || true + done + + if [ "$DOCKER_BRIDGE" != "$BRIDGE" ] ; then + kubeadm_run_iptables -t filter -D FORWARD -i $DOCKER_BRIDGE -o $BRIDGE -j DROP 2>/dev/null || true + fi + + kubeadm_run_iptables -t filter -D INPUT -d 127.0.0.1/32 -p tcp --dport 6784 -m addrtype ! --src-type LOCAL -m conntrack ! --ctstate RELATED,ESTABLISHED -m comment --comment "Block non-local access to Weave Net control port" -j DROP >/dev/null 2>&1 || true + kubeadm_run_iptables -t filter -D INPUT -i $DOCKER_BRIDGE -p udp --dport 53 -j ACCEPT >/dev/null 2>&1 || true + kubeadm_run_iptables -t filter -D INPUT -i $DOCKER_BRIDGE -p tcp --dport 53 -j ACCEPT >/dev/null 2>&1 || true + + if [ -n "$DOCKER_VERSION" ]; then + DOCKER_BRIDGE_IP=$(docker run --rm --pid host --net host --privileged -v /var/run/docker.sock:/var/run/docker.sock --entrypoint=/usr/bin/weaveutil $WEAVEEXEC_IMAGE:$WEAVE_TAG bridge-ip $DOCKER_BRIDGE) + + kubeadm_run_iptables -t filter -D INPUT -i $DOCKER_BRIDGE -p tcp --dst $DOCKER_BRIDGE_IP --dport $PORT -j DROP >/dev/null 2>&1 || true + kubeadm_run_iptables -t filter -D INPUT -i $DOCKER_BRIDGE -p udp --dst $DOCKER_BRIDGE_IP --dport $PORT -j DROP >/dev/null 2>&1 || true + kubeadm_run_iptables -t filter -D INPUT -i $DOCKER_BRIDGE -p udp --dst $DOCKER_BRIDGE_IP --dport $(($PORT + 1)) -j DROP >/dev/null 2>&1 || true + fi + + kubeadm_run_iptables -t filter -D FORWARD -i $BRIDGE ! -o $BRIDGE -j ACCEPT 2>/dev/null || true + kubeadm_run_iptables -t filter -D FORWARD -o $BRIDGE -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT 2>/dev/null || true + kubeadm_run_iptables -t filter -D FORWARD -i $BRIDGE -o $BRIDGE -j ACCEPT 2>/dev/null || true + kubeadm_run_iptables -F WEAVE-NPC >/dev/null 2>&1 || true + kubeadm_run_iptables -t filter -D FORWARD -o $BRIDGE -j WEAVE-NPC 2>/dev/null || true + kubeadm_run_iptables -t filter -D FORWARD -o $BRIDGE -m state --state NEW -j NFLOG --nflog-group 86 2>/dev/null || true + kubeadm_run_iptables -t filter -D FORWARD -o $BRIDGE -j DROP 2>/dev/null || true + kubeadm_run_iptables -X WEAVE-NPC >/dev/null 2>&1 || true + + kubeadm_run_iptables -F WEAVE-EXPOSE >/dev/null 2>&1 || true + kubeadm_run_iptables -t filter -D FORWARD -o $BRIDGE -j WEAVE-EXPOSE 2>/dev/null || true + kubeadm_run_iptables -X WEAVE-EXPOSE >/dev/null 2>&1 || true + + kubeadm_run_iptables -t nat -F WEAVE >/dev/null 2>&1 || true + kubeadm_run_iptables -t nat -D POSTROUTING -j WEAVE >/dev/null 2>&1 || true + kubeadm_run_iptables -t nat -D POSTROUTING -o $BRIDGE -j ACCEPT >/dev/null 2>&1 || true + kubeadm_run_iptables -t nat -X WEAVE >/dev/null 2>&1 || true + + for LOCAL_IFNAME in $(ip link show | grep v${CONTAINER_IFNAME}pl | cut -d ' ' -f 2 | tr -d ':') ; do + ip link del ${LOCAL_IFNAME%@*} >/dev/null 2>&1 || true + done +} + +function kubeadm_run_iptables() { + # -w is recent addition to iptables + if [ -z "$CHECKED_IPTABLES_W" ] ; then + iptables -S -w >/dev/null 2>&1 && IPTABLES_W=-w + CHECKED_IPTABLES_W=1 + fi + + iptables $IPTABLES_W "$@" +} + +function kubeadm_containerd_restart() { + systemctl restart containerd +} + +function kubeadm_registry_containerd_configure() { + local registry_ip="$1" + + local server="$registry_ip" + if [ "$IPV6_ONLY" = "1" ]; then + server="registry.kurl.svc.cluster.local" + sed -i '/registry\.kurl\.svc\.cluster\.local/d' /etc/hosts + echo "$registry_ip $server" >> /etc/hosts + fi + + if grep -Fq "plugins.\"io.containerd.grpc.v1.cri\".registry.configs.\"${server}\".tls" /etc/containerd/config.toml; then + echo "Registry ${server} TLS already configured for containerd" + return 0 + fi + + cat >> /etc/containerd/config.toml </dev/null +} + +function kubeadm_conf_api_version() { + + # Get kubeadm api version from the runtime + # Enforce the use of kubeadm.k8s.io/v1beta3 api version beginning with Kubernetes 1.26+ + local kubeadm_v1beta3_min_version= + kubeadm_v1beta3_min_version="26" + if [ -n "$KUBERNETES_TARGET_VERSION_MINOR" ]; then + if [ "$KUBERNETES_TARGET_VERSION_MINOR" -ge "$kubeadm_v1beta3_min_version" ]; then + echo "v1beta3" + else + echo "v1beta2" + fi + else + # ################################ NOTE ########################################## # + # get the version from an existing cluster when the installer is not run # + # i.e. this is meant to handle cases where kubeadm config is patched from tasks.sh # + + semverParse "$(kubeadm version --output=short | sed 's/v//')" + # shellcheck disable=SC2154 + local kube_current_version_minor="$minor" + if [ "$kube_current_version_minor" -ge "$kubeadm_v1beta3_min_version" ]; then + echo "v1beta3" + else + echo "v1beta2" + fi + fi +} + +# kubeadm_customize_config mutates a kubeadm configuration file for Kubernetes compatibility purposes +function kubeadm_customize_config() { + local kubeadm_patch_config=$1 + + # Templatize the api version for kubeadm patches + # shellcheck disable=SC2016 + sed -i 's|kubeadm.k8s.io/v1beta.*|kubeadm.k8s.io/$(kubeadm_conf_api_version)|' "$kubeadm_patch_config" + + # Kubernetes 1.24 deprecated the '--container-runtime' kubelet argument in 1.24 and removed it in 1.27 + # See: https://kubernetes.io/blog/2023/03/17/upcoming-changes-in-kubernetes-v1-27/#removal-of-container-runtime-command-line-argument + if [ "$KUBERNETES_TARGET_VERSION_MINOR" -ge "24" ]; then + # remove kubeletExtraArgs.container-runtime from the containerd kubeadm addon patch + sed -i '/container-runtime:/d' "$kubeadm_patch_config" + fi +} + + +# containerd_patch_for_minor_version returns the maximum patch version for the given minor version. uses +# $CONTAINERD_STEP_VERSIONS to determine the max patch. if the minor version is not found, returns an +# empty string. +function containerd_patch_for_minor_version() { + local for_major=$1 + local for_minor=$2 + for i in "${CONTAINERD_STEP_VERSIONS[@]}"; do + semverParse "$i" + if [ "$major" == "$for_major" ] && [ "$minor" == "$for_minor" ]; then + echo "$patch" + return 0 + fi + done + echo "" +} + +# containerd_migration_steps returns an array with all steps necessary to migrate from the current containerd +# version to the desired version. +function containerd_migration_steps() { + local from_version=$1 + local to_version=$2 + + local current_minor + local current_major + semverParse "$from_version" + current_major="$major" + current_minor=$((minor + 1)) + + local install_minor + semverParse "$to_version" + install_minor="$minor" + install_major="$major" + + local steps=() + while [ "$current_minor" -lt "$install_minor" ]; do + max_patch=$(containerd_patch_for_minor_version "$current_major" "$current_minor") + if [ -z "$max_patch" ]; then + bail "error: could not find patch for containerd minor version v$current_major.$current_minor" + fi + steps+=("$install_major.$current_minor.$max_patch") + current_minor=$((current_minor + 1)) + done + steps+=("$to_version") + + echo "${steps[@]}" +} + +# containerd_upgrade_between_majors returns true if the upgrade is between major versions. +function containerd_upgrade_between_majors() { + local from_version=$1 + local to_version=$2 + + local from_major + semverParse "$from_version" + from_major="$major" + + local to_major + semverParse "$to_version" + to_major="$major" + + test "$from_major" -ne "$to_major" +} + +# containerd_upgrade_is_possible verifies if an upgrade between the provided containerd +# versions is possible. we verify if the installed containerd is known to us, if there +# is no major versions upgrades and if the minor version upgrade is not too big. +function containerd_upgrade_is_possible() { + local from_version=$1 + local to_version=$2 + + # so far we don't have containerd version 2 and when it comes we don't know exactly + # from what version we will be able to upgrade to it from. so, for now, we block + # the attempt so when the version arrives the testgrid will fail. + if containerd_upgrade_between_majors "$from_version" "$to_version" ; then + bail "Upgrade between containerd major versions is not supported by this installer." + fi + + semverCompare "$from_version" "$to_version" + if [ "$SEMVER_COMPARE_RESULT" = "1" ]; then + bail "Downgrading containerd (from v$from_version to v$to_version) is not supported." + fi + + semverParse "$from_version" + local current_minor + current_minor="$minor" + + semverParse "$to_version" + local installing_minor + installing_minor="$minor" + + if [ "$installing_minor" -gt "$((current_minor + 2))" ]; then + logFail "Cannot upgrade containerd from v$from_version to v$to_version" + logFail "This installer supports only containerd upgrades spanning two minor versions." + bail "Please consider upgrading to an older containerd version first." + fi +} + +# containerd_evaluate_upgrade verifies if containerd upgrade between the two provided versions +# is possible and in case it is, returns the list of steps necessary to perform the upgrade. +# each step is a version of containerd that we need to install. +export CONTAINERD_INSTALL_VERSIONS=() +function containerd_evaluate_upgrade() { + local from_version=$1 + local to_version=$2 + echo "Evaluating if an upgrade from containerd v$from_version to v$to_version is possible." + containerd_upgrade_is_possible "$from_version" "$to_version" + echo "Containerd upgrade from v$from_version to v$to_version is possible." + for version in $(containerd_migration_steps "$from_version" "$to_version"); do + CONTAINERD_INSTALL_VERSIONS+=("$version") + done +} + + +KUBERNETES_INIT_IGNORE_PREFLIGHT_ERRORS="${KUBERNETES_INIT_IGNORE_PREFLIGHT_ERRORS:-}" + +function init() { + logStep "Initialize Kubernetes" + + kubernetes_maybe_generate_bootstrap_token + + local addr="$PRIVATE_ADDRESS" + local port="6443" + API_SERVICE_ADDRESS="$PRIVATE_ADDRESS:6443" + if [ "$HA_CLUSTER" = "1" ]; then + addr="$LOAD_BALANCER_ADDRESS" + port="$LOAD_BALANCER_PORT" + fi + addr=$($DIR/bin/kurl netutil format-ip-address "$addr") + API_SERVICE_ADDRESS="$addr:$port" + + local oldLoadBalancerAddress=$(kubernetes_load_balancer_address) + if commandExists ekco_handle_load_balancer_address_change_pre_init; then + ekco_handle_load_balancer_address_change_pre_init $oldLoadBalancerAddress $LOAD_BALANCER_ADDRESS + fi + if [ "$EKCO_ENABLE_INTERNAL_LOAD_BALANCER" = "1" ] && commandExists ekco_bootstrap_internal_lb; then + ekco_bootstrap_internal_lb + fi + + local kustomize_kubeadm_init="$DIR/kustomize/kubeadm/init" + + local NODE_HOSTNAME= + NODE_HOSTNAME=$(get_local_node_name) + # if the hostname is overridden, patch the kubeadm config to use the overridden hostname + if [ "$NODE_HOSTNAME" != "$(hostname | tr '[:upper:]' '[:lower:]')" ]; then + render_yaml_file_2 "$kustomize_kubeadm_init/kubeadm-init-hostname.patch.tmpl.yaml" \ + > "$kustomize_kubeadm_init/kubeadm-init-hostname.patch.yaml" + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + kubeadm-init-hostname.patch.yaml + fi + + CERT_KEY= + CERT_KEY_EXPIRY= + if [ "$HA_CLUSTER" = "1" ]; then + CERT_KEY=$(< /dev/urandom tr -dc a-f0-9 | head -c64) + CERT_KEY_EXPIRY=$(TZ="UTC" date -d "+2 hour" --rfc-3339=second | sed 's/ /T/') + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + patch-certificate-key.yaml + fi + + # kustomize can merge multiple list patches in some cases but it is not working for me on the + # ClusterConfiguration.apiServer.certSANs list + if [ -n "$PUBLIC_ADDRESS" ] && [ -n "$LOAD_BALANCER_ADDRESS" ]; then + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + patch-public-and-load-balancer-address.yaml + elif [ -n "$PUBLIC_ADDRESS" ]; then + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + patch-public-address.yaml + elif [ -n "$LOAD_BALANCER_ADDRESS" ]; then + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + patch-load-balancer-address.yaml + fi + + # conditional kubelet configuration fields + if [ "$KUBERNETES_TARGET_VERSION_MINOR" -ge "21" ]; then + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + patch-kubelet-21.yaml + else + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + patch-kubelet-pre21.yaml + fi + if [ "$KUBERNETES_CIS_COMPLIANCE" == "1" ]; then + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + patch-kubelet-cis-compliance.yaml + + if [ "$KUBERNETES_TARGET_VERSION_MINOR" -ge "20" ]; then + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + patch-cluster-config-cis-compliance.yaml + else + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + patch-cluster-config-cis-compliance-insecure-port.yaml + fi + fi + + if [ "$KUBE_RESERVED" == "1" ]; then + # gets the memory and CPU capacity of the worker node + MEMORY_MI=$(free -m | grep Mem | awk '{print $2}') + CPU_MILLICORES=$(($(nproc) * 1000)) + # calculates the amount of each resource to reserve + mebibytes_to_reserve=$(get_memory_mebibytes_to_reserve $MEMORY_MI) + cpu_millicores_to_reserve=$(get_cpu_millicores_to_reserve $CPU_MILLICORES) + + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + patch-kubelet-reserve-compute-resources.yaml + + render_yaml_file $kustomize_kubeadm_init/patch-kubelet-reserve-compute-resources.tpl > $kustomize_kubeadm_init/patch-kubelet-reserve-compute-resources.yaml + fi + if [ -n "$EVICTION_THRESHOLD" ]; then + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + patch-kubelet-eviction-threshold.yaml + + render_yaml_file $kustomize_kubeadm_init/patch-kubelet-eviction-threshold.tpl > $kustomize_kubeadm_init/patch-kubelet-eviction-threshold.yaml + fi + if [ -n "$SYSTEM_RESERVED" ]; then + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + patch-kubelet-system-reserved.yaml + + render_yaml_file $kustomize_kubeadm_init/patch-kubelet-system-reserved.tpl > $kustomize_kubeadm_init/patch-kubelet-system-reserved.yaml + fi + + if [ -n "$CONTAINER_LOG_MAX_SIZE" ]; then + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + patch-kubelet-container-log-max-size.yaml + + render_yaml_file $kustomize_kubeadm_init/patch-kubelet-container-log-max-size.tpl > $kustomize_kubeadm_init/patch-kubelet-container-log-max-size.yaml + fi + if [ -n "$CONTAINER_LOG_MAX_FILES" ]; then + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + patch-kubelet-container-log-max-files.yaml + + render_yaml_file $kustomize_kubeadm_init/patch-kubelet-container-log-max-files.tpl > $kustomize_kubeadm_init/patch-kubelet-container-log-max-files.yaml + fi + if [ -n "$KUBERNETES_MAX_PODS_PER_NODE" ]; then + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + patch-kubelet-max-pods.yaml + + render_yaml_file_2 $kustomize_kubeadm_init/patch-kubelet-max-pods.tmpl.yaml > $kustomize_kubeadm_init/patch-kubelet-max-pods.yaml + fi + + kubernetes_configure_pause_image "$kustomize_kubeadm_init" + + # Add kubeadm init patches from addons. + for patch in $(ls -1 ${kustomize_kubeadm_init}-patches/* 2>/dev/null || echo); do + patch_basename="$(basename $patch)" + cp "$patch" "$kustomize_kubeadm_init/$patch_basename" + + kubeadm_customize_config "$kustomize_kubeadm_init/$patch_basename" + insert_patches_strategic_merge \ + $kustomize_kubeadm_init/kustomization.yaml \ + "$patch_basename" + done + mkdir -p "$KUBEADM_CONF_DIR" + + # Generate kubeadm config + kubectl kustomize $kustomize_kubeadm_init > "$KUBEADM_CONF_DIR/kubeadm-init-raw.yaml" + render_yaml_file "$KUBEADM_CONF_DIR/kubeadm-init-raw.yaml" > "$KUBEADM_CONF_FILE" + + # kustomize requires assests have a metadata field while kubeadm config will reject yaml containing it + # this uses a go binary found in kurl/cmd/yamlutil to strip the metadata field from the yaml + # + cp $KUBEADM_CONF_FILE $KUBEADM_CONF_DIR/kubeadm_conf_copy_in + $DIR/bin/yamlutil -r -fp $KUBEADM_CONF_DIR/kubeadm_conf_copy_in -yp metadata + mv $KUBEADM_CONF_DIR/kubeadm_conf_copy_in $KUBEADM_CONF_FILE + + # When no_proxy changes kubeadm init rewrites the static manifests and fails because the api is + # restarting. Trigger the restart ahead of time and wait for it to be healthy. + if [ -f "/etc/kubernetes/manifests/kube-apiserver.yaml" ] && [ -n "$no_proxy" ] && ! grep -Fq "$no_proxy" /etc/kubernetes/manifests/kube-apiserver.yaml ; then + kubeadm init phase control-plane apiserver --config $KUBEADM_CONF_FILE + sleep 2 + if ! spinner_until 60 kubernetes_api_is_healthy; then + echo "Failed to wait for kubernetes API restart after no_proxy change" # continue + fi + fi + + if [ "$HA_CLUSTER" = "1" ]; then + UPLOAD_CERTS="--upload-certs" + fi + + # kubeadm init temporarily taints this node which causes rook to move any mons on it and may + # lead to a loss of quorum + disable_rook_ceph_operator + + # since K8s 1.19.1 kubeconfigs point to local API server even in HA setup. When upgrading from + # earlier versions and using a load balancer, kubeadm init will bail because the kubeconfigs + # already exist pointing to the load balancer + rm -rf /etc/kubernetes/*.conf + + # Regenerate api server cert in case load balancer address changed + if [ -f /etc/kubernetes/pki/apiserver.crt ]; then + mv -f /etc/kubernetes/pki/apiserver.crt /tmp/ + fi + if [ -f /etc/kubernetes/pki/apiserver.key ]; then + mv -f /etc/kubernetes/pki/apiserver.key /tmp/ + fi + + # ensure that /etc/kubernetes/audit.yaml exists + cp $kustomize_kubeadm_init/audit.yaml /etc/kubernetes/audit.yaml + mkdir -p /var/log/apiserver + + if [ -z "$KUBERNETES_INIT_IGNORE_PREFLIGHT_ERRORS" ]; then + KUBERNETES_INIT_IGNORE_PREFLIGHT_ERRORS=all + fi + + set -o pipefail + cmd_retry 3 kubeadm init \ + --ignore-preflight-errors="$KUBERNETES_INIT_IGNORE_PREFLIGHT_ERRORS" \ + --config $KUBEADM_CONF_FILE \ + $UPLOAD_CERTS \ + | tee /tmp/kubeadm-init + set +o pipefail + + # Node would be cordoned if migrated from docker to containerd + kubectl uncordon "$(get_local_node_name)" + + if [ -n "$LOAD_BALANCER_ADDRESS" ]; then + addr=$($DIR/bin/kurl netutil format-ip-address "$PRIVATE_ADDRESS") + spinner_until 120 cert_has_san "$addr:6443" "$LOAD_BALANCER_ADDRESS" + fi + + if commandExists ekco_cleanup_bootstrap_internal_lb; then + ekco_cleanup_bootstrap_internal_lb + fi + + spinner_kubernetes_api_stable + + exportKubeconfig + KUBEADM_TOKEN_CA_HASH=$(cat /tmp/kubeadm-init | grep 'discovery-token-ca-cert-hash' | awk '{ print $2 }' | head -1) + + if [ "$KUBERNETES_CIS_COMPLIANCE" == "1" ]; then + if [ "$KUBERNETES_TARGET_VERSION_MINOR" -le "24" ]; then + kubectl apply -f $kustomize_kubeadm_init/pod-security-policy-privileged.yaml + # patch 'PodSecurityPolicy' to kube-apiserver and wait for kube-apiserver to reconcile + old_admission_plugins='--enable-admission-plugins=NodeRestriction' + new_admission_plugins='--enable-admission-plugins=NodeRestriction,PodSecurityPolicy' + sed -i "s%$old_admission_plugins%$new_admission_plugins%g" /etc/kubernetes/manifests/kube-apiserver.yaml + spinner_kubernetes_api_stable + fi + + # create an 'etcd' user and group and ensure that it owns the etcd data directory (we don't care what userid these have, as etcd will still run as root) + useradd etcd || true + groupadd etcd || true + chown -R etcd:etcd /var/lib/etcd + fi + + wait_for_nodes + + # workaround as some code relies on this legacy label + kubectl label --overwrite node "$(get_local_node_name)" node-role.kubernetes.io/master= + + enable_rook_ceph_operator + + DID_INIT_KUBERNETES=1 + logSuccess "Kubernetes Master Initialized" + + local currentLoadBalancerAddress=$(kubernetes_load_balancer_address) + if [ "$currentLoadBalancerAddress" != "$oldLoadBalancerAddress" ]; then + # restart scheduler and controller-manager on this node so they use the new address + mv /etc/kubernetes/manifests/kube-scheduler.yaml /tmp/ && sleep 1 && mv /tmp/kube-scheduler.yaml /etc/kubernetes/manifests/ + mv /etc/kubernetes/manifests/kube-controller-manager.yaml /tmp/ && sleep 1 && mv /tmp/kube-controller-manager.yaml /etc/kubernetes/manifests/ + + if kubernetes_has_remotes; then + if commandExists ekco_handle_load_balancer_address_change_kubeconfigs; then + ekco_handle_load_balancer_address_change_kubeconfigs + else + # Manual steps for ekco < 0.11.0 + printf "${YELLOW}\nThe load balancer address has changed. Run the following on all remote nodes to use the new address${NC}\n" + printf "\n" + if [ "$AIRGAP" = "1" ]; then + printf "${GREEN} cat ./tasks.sh | sudo bash -s set-kubeconfig-server https://${currentLoadBalancerAddress}${NC}\n" + else + local prefix= + prefix="$(build_installer_prefix "${INSTALLER_ID}" "${KURL_VERSION}" "${KURL_URL}" "${PROXY_ADDRESS}")" + + printf "${GREEN} ${prefix}tasks.sh | sudo bash -s set-kubeconfig-server https://${currentLoadBalancerAddress}${NC}\n" + fi + + printf "\n" + printf "Continue? " + confirmN + fi + + if commandExists ekco_handle_load_balancer_address_change_post_init; then + ekco_handle_load_balancer_address_change_post_init $oldLoadBalancerAddress $LOAD_BALANCER_ADDRESS + fi + fi + + # restart kube-proxies so they use the new address + kubectl -n kube-system delete pods --selector=k8s-app=kube-proxy + fi + + labelNodes + kubectl cluster-info + + #approve csrs on the masters if cis compliance is enabled + if [ "$KUBERNETES_CIS_COMPLIANCE" == "1" ]; then + kubectl get csr | grep 'Pending' | grep 'kubelet-serving' | awk '{ print $1 }' | xargs -I {} kubectl certificate approve {} + fi + + # create kurl namespace if it doesn't exist + kubectl get ns kurl >/dev/null 2>&1 || kubectl create ns kurl --save-config + + spinner_until 120 kubernetes_default_service_account_exists + spinner_until 120 kubernetes_service_exists + + logSuccess "Cluster Initialized" + + kubernetes_configure_coredns + + if commandExists registry_init; then + registry_init + + if [ -n "$CONTAINERD_VERSION" ]; then + ${K8S_DISTRO}_registry_containerd_configure "${DOCKER_REGISTRY_IP}" + ${K8S_DISTRO}_containerd_restart + spinner_kubernetes_api_healthy + fi + fi + + # install the kurl in-cluster troubleshoot supportbundle spec + kubectl -n kurl apply -f "$DIR/manifests/troubleshoot.yaml" +} + +function kubeadm_post_init() { + BOOTSTRAP_TOKEN_EXPIRY=$(kubeadm token list | grep $BOOTSTRAP_TOKEN | awk '{print $3}') + kurl_config +} + +function kubernetes_maybe_generate_bootstrap_token() { + if [ -z "$BOOTSTRAP_TOKEN" ]; then + logStep "generate kubernetes bootstrap token" + BOOTSTRAP_TOKEN=$(kubeadm token generate) + fi + echo "Kubernetes bootstrap token: ${BOOTSTRAP_TOKEN}" + echo "This token will expire in 24 hours" +} + +function kurl_config() { + logStep "Persisting the kurl installer spec" + if kubernetes_resource_exists kube-system configmap kurl-config; then + kubectl -n kube-system delete configmap kurl-config + fi + + kubectl -n kube-system create configmap kurl-config \ + --from-literal=kurl_url="$KURL_URL" \ + --from-literal=installer_id="$INSTALLER_ID" \ + --from-literal=ha="$HA_CLUSTER" \ + --from-literal=airgap="$AIRGAP" \ + --from-literal=ca_hash="$KUBEADM_TOKEN_CA_HASH" \ + --from-literal=docker_registry_ip="$DOCKER_REGISTRY_IP" \ + --from-literal=kubernetes_api_address="$API_SERVICE_ADDRESS" \ + --from-literal=bootstrap_token="$BOOTSTRAP_TOKEN" \ + --from-literal=bootstrap_token_expiration="$BOOTSTRAP_TOKEN_EXPIRY" \ + --from-literal=cert_key="$CERT_KEY" \ + --from-literal=upload_certs_expiration="$CERT_KEY_EXPIRY" \ + --from-literal=service_cidr="$SERVICE_CIDR" \ + --from-literal=pod_cidr="$POD_CIDR" \ + --from-literal=kurl_install_directory="$KURL_INSTALL_DIRECTORY_FLAG" \ + --from-literal=additional_no_proxy_addresses="$ADDITIONAL_NO_PROXY_ADDRESSES" \ + --from-literal=kubernetes_cis_compliance="$KUBERNETES_CIS_COMPLIANCE" + + logSuccess "Kurl installer spec was successfully persisted in the kurl configmap" +} + +function outro() { + echo + if [ -z "$PUBLIC_ADDRESS" ]; then + if [ -z "$PRIVATE_ADDRESS" ]; then + PUBLIC_ADDRESS="" + PRIVATE_ADDRESS="" + else + PUBLIC_ADDRESS="$PRIVATE_ADDRESS" + fi + fi + + local common_flags + common_flags="${common_flags}$(get_docker_registry_ip_flag "${DOCKER_REGISTRY_IP}")" + service_cidr=$(kubectl -n kube-system get cm kurl-config -ojsonpath='{ .data.service_cidr }') + pod_cidr=$(kubectl -n kube-system get cm kurl-config -ojsonpath='{ .data.pod_cidr }') + + local no_proxy_addresses="" + + [ -n "$ADDITIONAL_NO_PROXY_ADDRESSES" ] && no_proxy_addresses="$ADDITIONAL_NO_PROXY_ADDRESSES" + [ -n "$service_cidr" ] && no_proxy_addresses="${no_proxy_addresses:+$no_proxy_addresses,}$service_cidr" + [ -n "$pod_cidr" ] && no_proxy_addresses="${no_proxy_addresses:+$no_proxy_addresses,}$pod_cidr" + [ -n "$no_proxy_addresses" ] && common_flags="${common_flags}$(get_additional_no_proxy_addresses_flag 1 "$no_proxy_addresses")" + + common_flags="${common_flags}$(get_kurl_install_directory_flag "${KURL_INSTALL_DIRECTORY_FLAG}")" + common_flags="${common_flags}$(get_remotes_flags)" + common_flags="${common_flags}$(get_ipv6_flag)" + + KUBEADM_TOKEN_CA_HASH=$(cat /tmp/kubeadm-init | grep 'discovery-token-ca-cert-hash' | awk '{ print $2 }' | head -1) + + printf "\n" + printf "\t\t${GREEN}Installation${NC}\n" + printf "\t\t${GREEN} Complete ✔${NC}\n" + + addon_outro + printf "\n" + kubeconfig_setup_outro + printf "\n" + if [ "$OUTRO_NOTIFIY_TO_RESTART_DOCKER" = "1" ]; then + printf "\n" + printf "\n" + printf "The local /etc/docker/daemon.json has been merged with the spec from the installer, but has not been applied. To apply restart docker." + printf "\n" + printf "\n" + printf "${GREEN} systemctl daemon-reload${NC}\n" + printf "${GREEN} systemctl restart docker${NC}\n" + printf "\n" + printf "These settings will automatically be applied on the next restart." + printf "\n" + fi + printf "\n" + printf "\n" + + local prefix= + prefix="$(build_installer_prefix "${INSTALLER_ID}" "${KURL_VERSION}" "${KURL_URL}" "${PROXY_ADDRESS}")" + + if [ "$HA_CLUSTER" = "1" ]; then + printf "Master node join commands expire after two hours, and worker node join commands expire after 24 hours.\n" + printf "\n" + if [ "$AIRGAP" = "1" ]; then + printf "To generate new node join commands, run ${GREEN}cat ./tasks.sh | sudo bash -s join_token ha airgap${NC} on an existing master node.\n" + else + printf "To generate new node join commands, run ${GREEN}${prefix}tasks.sh | sudo bash -s join_token ha${NC} on an existing master node.\n" + fi + else + printf "Node join commands expire after 24 hours.\n" + printf "\n" + if [ "$AIRGAP" = "1" ]; then + printf "To generate new node join commands, run ${GREEN}cat ./tasks.sh | sudo bash -s join_token airgap${NC} on this node.\n" + else + printf "To generate new node join commands, run ${GREEN}${prefix}tasks.sh | sudo bash -s join_token${NC} on this node.\n" + fi + fi + + if [ "$AIRGAP" = "1" ]; then + printf "\n" + printf "To add worker nodes to this installation, copy and unpack this bundle on your other nodes, and run the following:" + printf "\n" + printf "\n" + printf "${GREEN} cat ./join.sh | sudo bash -s airgap kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=${KUBEADM_TOKEN_CA_HASH} kubernetes-version=${KUBERNETES_VERSION}${common_flags}\n" + printf "${NC}" + printf "\n" + printf "\n" + if [ "$HA_CLUSTER" = "1" ]; then + printf "\n" + printf "To add ${GREEN}MASTER${NC} nodes to this installation, copy and unpack this bundle on your other nodes, and run the following:" + printf "\n" + printf "\n" + printf "${GREEN} cat ./join.sh | sudo bash -s airgap kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=${KUBEADM_TOKEN_CA_HASH} kubernetes-version=${KUBERNETES_VERSION} cert-key=${CERT_KEY} control-plane${common_flags}\n" + printf "${NC}" + printf "\n" + printf "\n" + fi + else + printf "\n" + printf "To add worker nodes to this installation, run the following script on your other nodes:" + printf "\n" + printf "${GREEN} ${prefix}join.sh | sudo bash -s kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=${KUBEADM_TOKEN_CA_HASH} kubernetes-version=${KUBERNETES_VERSION}${common_flags}\n" + printf "${NC}" + printf "\n" + printf "\n" + if [ "$HA_CLUSTER" = "1" ]; then + printf "\n" + printf "To add ${GREEN}MASTER${NC} nodes to this installation, run the following script on your other nodes:" + printf "\n" + printf "${GREEN} ${prefix}join.sh | sudo bash -s kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=$KUBEADM_TOKEN_CA_HASH kubernetes-version=${KUBERNETES_VERSION} cert-key=${CERT_KEY} control-plane${common_flags}\n" + printf "${NC}" + printf "\n" + printf "\n" + fi + fi +} + +function all_kubernetes_install() { + kubernetes_host + install_helm + ${K8S_DISTRO}_addon_for_each addon_load + helm_load + init + apply_installer_crd +} + +function report_kubernetes_install() { + report_addon_start "kubernetes" "$KUBERNETES_VERSION" + export REPORTING_CONTEXT_INFO="kubernetes $KUBERNETES_VERSION" + all_kubernetes_install + export REPORTING_CONTEXT_INFO="" + report_addon_success "kubernetes" "$KUBERNETES_VERSION" +} + +K8S_DISTRO=kubeadm + +function main() { + logStep "Running install with the argument(s): $*" + require_root_user + # ensure /usr/local/bin/kubectl-plugin is in the path + path_add "/usr/local/bin" + kubernetes_init_hostname + get_patch_yaml "$@" + maybe_read_kurl_config_from_cluster + + if [ "$AIRGAP" = "1" ]; then + move_airgap_assets + fi + pushd_install_directory + + yaml_airgap + proxy_bootstrap + download_util_binaries + get_machine_id + merge_yaml_specs + apply_bash_flag_overrides "$@" + parse_yaml_into_bash_variables + MASTER=1 # parse_yaml_into_bash_variables will unset master + prompt_license + + export KUBECONFIG=/etc/kubernetes/admin.conf + + is_ha + parse_kubernetes_target_version + discover full-cluster + report_install_start + trap ctrl_c SIGINT # trap ctrl+c (SIGINT) and handle it by reporting that the user exited intentionally (along with the line/version/etc) + trap trap_report_error ERR # trap errors and handle it by reporting the error line and parent function + preflights + init_preflights + kubernetes_upgrade_preflight + common_prompts + journald_persistent + configure_proxy + configure_no_proxy_preinstall + ${K8S_DISTRO}_addon_for_each addon_fetch + kubernetes_get_packages + preflights_require_host_packages + if [ -z "$CURRENT_KUBERNETES_VERSION" ]; then + host_preflights "1" "0" "0" + cluster_preflights "1" "0" "0" + else + host_preflights "1" "0" "1" + cluster_preflights "1" "0" "1" + fi + install_host_dependencies + get_common + setup_kubeadm_kustomize + rook_upgrade_maybe_report_upgrade_rook + kubernetes_pre_init + ${K8S_DISTRO}_addon_for_each addon_pre_init + discover_pod_subnet + discover_service_subnet + configure_no_proxy + install_cri + kubernetes_configure_pause_image_upgrade + get_shared + report_upgrade_kubernetes + report_kubernetes_install + export SUPPORT_BUNDLE_READY=1 # allow ctrl+c and ERR traps to collect support bundles now that k8s is installed + kurl_init_config + maybe_set_kurl_cluster_uuid + ${K8S_DISTRO}_addon_for_each addon_install + maybe_cleanup_rook + maybe_cleanup_longhorn + helmfile_sync + kubeadm_post_init + uninstall_docker + ${K8S_DISTRO}_addon_for_each addon_post_init + check_proxy_config + outro + package_cleanup + + popd_install_directory + + report_install_success +} + +# tee logs into /var/log/kurl/install-.log and stdout +mkdir -p /var/log/kurl +LOGFILE="/var/log/kurl/install-$(date +"%Y-%m-%dT%H-%M-%S").log" +main "$@" 2>&1 | tee $LOGFILE +# it is required to return the exit status of the script +FINAL_RESULT="${PIPESTATUS[0]}" +sed -i "/\b\(password\)\b/d" $LOGFILE > /dev/null 2>&1 +exit "$FINAL_RESULT" diff --git a/tsconfig.json b/tsconfig.json old mode 100644 new mode 100755 index e6adda9..de7c293 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,76 +1,27 @@ { "compilerOptions": { - /* Visit https://aka.ms/tsconfig.json to read more about this file */ - - /* Basic Options */ - // "incremental": true, /* Enable incremental compilation */ - "target": "es2020", /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017', 'ES2018', 'ES2019', 'ES2020', or 'ESNEXT'. */ - "module": "commonjs", /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', 'es2020', or 'ESNext'. */ - "lib": ["ES2020.String", "DOM", "DOM.Iterable", "ES2020"], /* Specify library files to be included in the compilation. */ - // "allowJs": true, /* Allow javascript files to be compiled. */ - // "checkJs": true, /* Report errors in .js files. */ - // "jsx": "preserve", /* Specify JSX code generation: 'preserve', 'react-native', or 'react'. */ - // "declaration": true, /* Generates corresponding '.d.ts' file. */ - // "declarationMap": true, /* Generates a sourcemap for each corresponding '.d.ts' file. */ - // "sourceMap": true, /* Generates corresponding '.map' file. */ - // "outFile": "./", /* Concatenate and emit output to single file. */ - "outDir": "./js", /* Redirect output structure to the directory. */ - // "rootDir": "./", /* Specify the root directory of input files. Use to control the output directory structure with --outDir. */ - // "composite": true, /* Enable project compilation */ - // "tsBuildInfoFile": "./", /* Specify file to store incremental compilation information */ - // "removeComments": true, /* Do not emit comments to output. */ - // "noEmit": true, /* Do not emit outputs. */ - // "importHelpers": true, /* Import emit helpers from 'tslib'. */ - // "downlevelIteration": true, /* Provide full support for iterables in 'for-of', spread, and destructuring when targeting 'ES5' or 'ES3'. */ - // "isolatedModules": true, /* Transpile each file as a separate module (similar to 'ts.transpileModule'). */ - - /* Strict Type-Checking Options */ - "strict": true, /* Enable all strict type-checking options. */ - // "noImplicitAny": true, /* Raise error on expressions and declarations with an implied 'any' type. */ - // "strictNullChecks": true, /* Enable strict null checks. */ - // "strictFunctionTypes": true, /* Enable strict checking of function types. */ - // "strictBindCallApply": true, /* Enable strict 'bind', 'call', and 'apply' methods on functions. */ - // "strictPropertyInitialization": true, /* Enable strict checking of property initialization in classes. */ - // "noImplicitThis": true, /* Raise error on 'this' expressions with an implied 'any' type. */ - // "alwaysStrict": true, /* Parse in strict mode and emit "use strict" for each source file. */ - - /* Additional Checks */ - // "noUnusedLocals": true, /* Report errors on unused locals. */ - // "noUnusedParameters": true, /* Report errors on unused parameters. */ - // "noImplicitReturns": true, /* Report error when not all code paths in function return a value. */ - // "noFallthroughCasesInSwitch": true, /* Report errors for fallthrough cases in switch statement. */ - // "noUncheckedIndexedAccess": true, /* Include 'undefined' in index signature results */ - - /* Module Resolution Options */ - // "moduleResolution": "node", /* Specify module resolution strategy: 'node' (Node.js) or 'classic' (TypeScript pre-1.6). */ - // "baseUrl": "./", /* Base directory to resolve non-absolute module names. */ - // "paths": {}, /* A series of entries which re-map imports to lookup locations relative to the 'baseUrl'. */ - // "rootDirs": [], /* List of root folders whose combined content represents the structure of the project at runtime. */ - // "typeRoots": [], /* List of folders to include type definitions from. */ - // "types": [], /* Type declaration files to be included in compilation. */ - // "allowSyntheticDefaultImports": true, /* Allow default imports from modules with no default export. This does not affect code emit, just typechecking. */ - "esModuleInterop": true, /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */ - // "preserveSymlinks": true, /* Do not resolve the real path of symlinks. */ - // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */ - - /* Source Map Options */ - // "sourceRoot": "", /* Specify the location where debugger should locate TypeScript files instead of source locations. */ - // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */ - // "inlineSourceMap": true, /* Emit a single file with source maps instead of having a separate file. */ - // "inlineSources": true, /* Emit the source alongside the sourcemaps within a single file; requires '--inlineSourceMap' or '--sourceMap' to be set. */ - - /* Experimental Options */ - // "experimentalDecorators": true, /* Enables experimental support for ES7 decorators. */ - // "emitDecoratorMetadata": true, /* Enables experimental support for emitting type metadata for decorators. */ - - /* Advanced Options */ - "skipLibCheck": true, /* Skip type checking of declaration files. */ - "forceConsistentCasingInFileNames": true, /* Disallow inconsistently-cased references to the same file. */ - - "resolveJsonModule": true + "alwaysStrict": true, + "declaration": true, + "experimentalDecorators": true, + "inlineSourceMap": true, + "inlineSources": true, + "lib": ["es2016"], + "module": "CommonJS", + "noEmit": false, + "noEmitOnError": true, + "noFallthroughCasesInSwitch": true, + "noImplicitAny": true, + "noImplicitReturns": true, + "noImplicitThis": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "resolveJsonModule": true, + "strict": true, + "strictNullChecks": true, + "strictPropertyInitialization": true, + "stripInternal": true, + "target": "ES2017" }, - "files": [ - "./plugins/release.ts", - "./plugins/index.d.ts" - ] -} + "include": ["**/*.ts"], + "exclude": ["node_modules"] +} \ No newline at end of file diff --git a/yarn.lock b/yarn.lock deleted file mode 100644 index 910108d..0000000 --- a/yarn.lock +++ /dev/null @@ -1,1911 +0,0 @@ -# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. -# yarn lockfile v1 - - -"@actions/core@^1.2.6": - version "1.4.0" - resolved "https://registry.yarnpkg.com/@actions/core/-/core-1.4.0.tgz#cf2e6ee317e314b03886adfeb20e448d50d6e524" - integrity sha512-CGx2ilGq5i7zSLgiiGUtBCxhRRxibJYU6Fim0Q1Wg2aQL2LTnF27zbqZOrxfvFQ55eSBW0L8uVStgtKMpa0Qlg== - -"@actions/github@^5.0.0": - version "5.0.0" - resolved "https://registry.yarnpkg.com/@actions/github/-/github-5.0.0.tgz#1754127976c50bd88b2e905f10d204d76d1472f8" - integrity sha512-QvE9eAAfEsS+yOOk0cylLBIO/d6WyWIOvsxxzdrPFaud39G6BOkUwScXZn1iBzQzHyu9SBkkLSWlohDWdsasAQ== - dependencies: - "@actions/http-client" "^1.0.11" - "@octokit/core" "^3.4.0" - "@octokit/plugin-paginate-rest" "^2.13.3" - "@octokit/plugin-rest-endpoint-methods" "^5.1.1" - -"@actions/http-client@^1.0.11": - version "1.0.11" - resolved "https://registry.yarnpkg.com/@actions/http-client/-/http-client-1.0.11.tgz#c58b12e9aa8b159ee39e7dd6cbd0e91d905633c0" - integrity sha512-VRYHGQV1rqnROJqdMvGUbY/Kn8vriQe/F9HR2AlYHzmKuM/p3kjNuXhmdBfcVgsvRWTz5C5XW5xvndZrVBuAYg== - dependencies: - tunnel "0.0.6" - -"@babel/code-frame@^7.0.0": - version "7.10.4" - resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.10.4.tgz#168da1a36e90da68ae8d49c0f1b48c7c6249213a" - integrity sha512-vG6SvB6oYEhvgisZNFRmRCUkLz11c7rp+tbNTynGqc6mS1d5ATd/sGyV6W0KZZnXRKMTzZDRgQT3Ou9jhpAfUg== - dependencies: - "@babel/highlight" "^7.10.4" - -"@babel/helper-validator-identifier@^7.10.4": - version "7.10.4" - resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.10.4.tgz#a78c7a7251e01f616512d31b10adcf52ada5e0d2" - integrity sha512-3U9y+43hz7ZM+rzG24Qe2mufW5KhvFg/NhnNph+i9mgCtdTCtMJuI1TMkrIUiK7Ix4PYlRF9I5dhqaLYA/ADXw== - -"@babel/highlight@^7.10.4": - version "7.10.4" - resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.10.4.tgz#7d1bdfd65753538fabe6c38596cdb76d9ac60143" - integrity sha512-i6rgnR/YgPEQzZZnbTHHuZdlE8qyoBNalD6F+q4vAFlcMEcqmkoG+mPqJYJCo63qPf74+Y1UZsl3l6f7/RIkmA== - dependencies: - "@babel/helper-validator-identifier" "^7.10.4" - chalk "^2.0.0" - js-tokens "^4.0.0" - -"@iarna/toml@2.2.5": - version "2.2.5" - resolved "https://registry.yarnpkg.com/@iarna/toml/-/toml-2.2.5.tgz#b32366c89b43c6f8cefbdefac778b9c828e3ba8c" - integrity sha512-trnsAYxU3xnS1gPHPyU961coFyLkh4gAD/0zQ5mymY4yOZ+CYvsPqUbOFSw0aDM4y0tV7tiFxL/1XfXPNC6IPg== - -"@nodelib/fs.scandir@2.1.3": - version "2.1.3" - resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.3.tgz#3a582bdb53804c6ba6d146579c46e52130cf4a3b" - integrity sha512-eGmwYQn3gxo4r7jdQnkrrN6bY478C3P+a/y72IJukF8LjB6ZHeB3c+Ehacj3sYeSmUXGlnA67/PmbM9CVwL7Dw== - dependencies: - "@nodelib/fs.stat" "2.0.3" - run-parallel "^1.1.9" - -"@nodelib/fs.stat@2.0.3", "@nodelib/fs.stat@^2.0.2": - version "2.0.3" - resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-2.0.3.tgz#34dc5f4cabbc720f4e60f75a747e7ecd6c175bd3" - integrity sha512-bQBFruR2TAwoevBEd/NWMoAAtNGzTRgdrqnYCc7dhzfoNvqPzLyqlEQnzZ3kVnNrSp25iyxE00/3h2fqGAGArA== - -"@nodelib/fs.walk@^1.2.3": - version "1.2.4" - resolved "https://registry.yarnpkg.com/@nodelib/fs.walk/-/fs.walk-1.2.4.tgz#011b9202a70a6366e436ca5c065844528ab04976" - integrity sha512-1V9XOY4rDW0rehzbrcqAmHnz8e7SKvX27gh8Gt2WgB0+pdzdiLV83p72kZPU+jvMbS1qU5mauP2iOvO8rhmurQ== - dependencies: - "@nodelib/fs.scandir" "2.1.3" - fastq "^1.6.0" - -"@octokit/auth-token@^2.4.0": - version "2.4.2" - resolved "https://registry.yarnpkg.com/@octokit/auth-token/-/auth-token-2.4.2.tgz#10d0ae979b100fa6b72fa0e8e63e27e6d0dbff8a" - integrity sha512-jE/lE/IKIz2v1+/P0u4fJqv0kYwXOTujKemJMFr6FeopsxlIK3+wKDCJGnysg81XID5TgZQbIfuJ5J0lnTiuyQ== - dependencies: - "@octokit/types" "^5.0.0" - -"@octokit/auth-token@^2.4.4": - version "2.4.5" - resolved "https://registry.yarnpkg.com/@octokit/auth-token/-/auth-token-2.4.5.tgz#568ccfb8cb46f36441fac094ce34f7a875b197f3" - integrity sha512-BpGYsPgJt05M7/L/5FoE1PiAbdxXFZkX/3kDYcsvd1v6UhlnE5e96dTDr0ezX/EFwciQxf3cNV0loipsURU+WA== - dependencies: - "@octokit/types" "^6.0.3" - -"@octokit/core@^3.0.0": - version "3.1.2" - resolved "https://registry.yarnpkg.com/@octokit/core/-/core-3.1.2.tgz#c937d5f9621b764573068fcd2e5defcc872fd9cc" - integrity sha512-AInOFULmwOa7+NFi9F8DlDkm5qtZVmDQayi7TUgChE3yeIGPq0Y+6cAEXPexQ3Ea+uZy66hKEazR7DJyU+4wfw== - dependencies: - "@octokit/auth-token" "^2.4.0" - "@octokit/graphql" "^4.3.1" - "@octokit/request" "^5.4.0" - "@octokit/types" "^5.0.0" - before-after-hook "^2.1.0" - universal-user-agent "^6.0.0" - -"@octokit/core@^3.4.0", "@octokit/core@^3.5.0": - version "3.5.1" - resolved "https://registry.yarnpkg.com/@octokit/core/-/core-3.5.1.tgz#8601ceeb1ec0e1b1b8217b960a413ed8e947809b" - integrity sha512-omncwpLVxMP+GLpLPgeGJBF6IWJFjXDS5flY5VbppePYX9XehevbDykRH9PdCdvqt9TS5AOTiDide7h0qrkHjw== - dependencies: - "@octokit/auth-token" "^2.4.4" - "@octokit/graphql" "^4.5.8" - "@octokit/request" "^5.6.0" - "@octokit/request-error" "^2.0.5" - "@octokit/types" "^6.0.3" - before-after-hook "^2.2.0" - universal-user-agent "^6.0.0" - -"@octokit/endpoint@^6.0.1": - version "6.0.8" - resolved "https://registry.yarnpkg.com/@octokit/endpoint/-/endpoint-6.0.8.tgz#91b07e236fdb69929c678c6439f7a560dc6058ac" - integrity sha512-MuRrgv+bM4Q+e9uEvxAB/Kf+Sj0O2JAOBA131uo1o6lgdq1iS8ejKwtqHgdfY91V3rN9R/hdGKFiQYMzVzVBEQ== - dependencies: - "@octokit/types" "^5.0.0" - is-plain-object "^5.0.0" - universal-user-agent "^6.0.0" - -"@octokit/graphql@^4.3.1": - version "4.5.6" - resolved "https://registry.yarnpkg.com/@octokit/graphql/-/graphql-4.5.6.tgz#708143ba15cf7c1879ed6188266e7f270be805d4" - integrity sha512-Rry+unqKTa3svswT2ZAuqenpLrzJd+JTv89LTeVa5UM/5OX8o4KTkPL7/1ABq4f/ZkELb0XEK/2IEoYwykcLXg== - dependencies: - "@octokit/request" "^5.3.0" - "@octokit/types" "^5.0.0" - universal-user-agent "^6.0.0" - -"@octokit/graphql@^4.5.8": - version "4.6.4" - resolved "https://registry.yarnpkg.com/@octokit/graphql/-/graphql-4.6.4.tgz#0c3f5bed440822182e972317122acb65d311a5ed" - integrity sha512-SWTdXsVheRmlotWNjKzPOb6Js6tjSqA2a8z9+glDJng0Aqjzti8MEWOtuT8ZSu6wHnci7LZNuarE87+WJBG4vg== - dependencies: - "@octokit/request" "^5.6.0" - "@octokit/types" "^6.0.3" - universal-user-agent "^6.0.0" - -"@octokit/openapi-types@^9.2.0": - version "9.2.0" - resolved "https://registry.yarnpkg.com/@octokit/openapi-types/-/openapi-types-9.2.0.tgz#46bbfe6a85bfd2987e69216955fcd04df7d025bb" - integrity sha512-c4A1Xm0At+ypvBfEETREu519wLncJYQXvY+dBGg/V5YA51eg5EwdDsPPfcOMG0cuXscqRvsIgIySTmTJUdcTNA== - -"@octokit/plugin-paginate-rest@^2.13.3", "@octokit/plugin-paginate-rest@^2.6.2": - version "2.14.0" - resolved "https://registry.yarnpkg.com/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-2.14.0.tgz#f469cb4a908792fb44679c5973d8bba820c88b0f" - integrity sha512-S2uEu2uHeI7Vf+Lvj8tv3O5/5TCAa8GHS0dUQN7gdM7vKA6ZHAbR6HkAVm5yMb1mbedLEbxOuQ+Fa0SQ7tCDLA== - dependencies: - "@octokit/types" "^6.18.0" - -"@octokit/plugin-paginate-rest@^2.2.0": - version "2.4.0" - resolved "https://registry.yarnpkg.com/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-2.4.0.tgz#92f951ddc8a1cd505353fa07650752ca25ed7e93" - integrity sha512-YT6Klz3LLH6/nNgi0pheJnUmTFW4kVnxGft+v8Itc41IIcjl7y1C8TatmKQBbCSuTSNFXO5pCENnqg6sjwpJhg== - dependencies: - "@octokit/types" "^5.5.0" - -"@octokit/plugin-request-log@^1.0.0": - version "1.0.0" - resolved "https://registry.yarnpkg.com/@octokit/plugin-request-log/-/plugin-request-log-1.0.0.tgz#eef87a431300f6148c39a7f75f8cfeb218b2547e" - integrity sha512-ywoxP68aOT3zHCLgWZgwUJatiENeHE7xJzYjfz8WI0goynp96wETBF+d95b8g/uL4QmS6owPVlaxiz3wyMAzcw== - -"@octokit/plugin-request-log@^1.0.2": - version "1.0.4" - resolved "https://registry.yarnpkg.com/@octokit/plugin-request-log/-/plugin-request-log-1.0.4.tgz#5e50ed7083a613816b1e4a28aeec5fb7f1462e85" - integrity sha512-mLUsMkgP7K/cnFEw07kWqXGF5LKrOkD+lhCrKvPHXWDywAwuDUeDwWBpc69XK3pNX0uKiVt8g5z96PJ6z9xCFA== - -"@octokit/plugin-rest-endpoint-methods@4.2.0": - version "4.2.0" - resolved "https://registry.yarnpkg.com/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-4.2.0.tgz#c5a0691b3aba5d8b4ef5dffd6af3649608f167ba" - integrity sha512-1/qn1q1C1hGz6W/iEDm9DoyNoG/xdFDt78E3eZ5hHeUfJTLJgyAMdj9chL/cNBHjcjd+FH5aO1x0VCqR2RE0mw== - dependencies: - "@octokit/types" "^5.5.0" - deprecation "^2.3.1" - -"@octokit/plugin-rest-endpoint-methods@5.5.2", "@octokit/plugin-rest-endpoint-methods@^5.1.1": - version "5.5.2" - resolved "https://registry.yarnpkg.com/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-5.5.2.tgz#c8bdb3065a9725e30802295f10a31b3ff434830c" - integrity sha512-1ArooY7AYQdUd2zyqWLFHQ6gver9PvZSiuM+EPAsDplv1Y6u8zHl6yZ7yGIgaf7xvWupwUkJS2WttGYyb1P0DQ== - dependencies: - "@octokit/types" "^6.22.0" - deprecation "^2.3.1" - -"@octokit/request-error@^2.0.0": - version "2.0.2" - resolved "https://registry.yarnpkg.com/@octokit/request-error/-/request-error-2.0.2.tgz#0e76b83f5d8fdda1db99027ea5f617c2e6ba9ed0" - integrity sha512-2BrmnvVSV1MXQvEkrb9zwzP0wXFNbPJij922kYBTLIlIafukrGOb+ABBT2+c6wZiuyWDH1K1zmjGQ0toN/wMWw== - dependencies: - "@octokit/types" "^5.0.1" - deprecation "^2.0.0" - once "^1.4.0" - -"@octokit/request-error@^2.0.5", "@octokit/request-error@^2.1.0": - version "2.1.0" - resolved "https://registry.yarnpkg.com/@octokit/request-error/-/request-error-2.1.0.tgz#9e150357831bfc788d13a4fd4b1913d60c74d677" - integrity sha512-1VIvgXxs9WHSjicsRwq8PlR2LR2x6DwsJAaFgzdi0JfJoGSO8mYI/cHJQ+9FbN21aa+DrgNLnwObmyeSC8Rmpg== - dependencies: - "@octokit/types" "^6.0.3" - deprecation "^2.0.0" - once "^1.4.0" - -"@octokit/request@^5.3.0", "@octokit/request@^5.4.0": - version "5.4.9" - resolved "https://registry.yarnpkg.com/@octokit/request/-/request-5.4.9.tgz#0a46f11b82351b3416d3157261ad9b1558c43365" - integrity sha512-CzwVvRyimIM1h2n9pLVYfTDmX9m+KHSgCpqPsY8F1NdEK8IaWqXhSBXsdjOBFZSpEcxNEeg4p0UO9cQ8EnOCLA== - dependencies: - "@octokit/endpoint" "^6.0.1" - "@octokit/request-error" "^2.0.0" - "@octokit/types" "^5.0.0" - deprecation "^2.0.0" - is-plain-object "^5.0.0" - node-fetch "^2.6.1" - once "^1.4.0" - universal-user-agent "^6.0.0" - -"@octokit/request@^5.6.0": - version "5.6.0" - resolved "https://registry.yarnpkg.com/@octokit/request/-/request-5.6.0.tgz#6084861b6e4fa21dc40c8e2a739ec5eff597e672" - integrity sha512-4cPp/N+NqmaGQwbh3vUsYqokQIzt7VjsgTYVXiwpUP2pxd5YiZB2XuTedbb0SPtv9XS7nzAKjAuQxmY8/aZkiA== - dependencies: - "@octokit/endpoint" "^6.0.1" - "@octokit/request-error" "^2.1.0" - "@octokit/types" "^6.16.1" - is-plain-object "^5.0.0" - node-fetch "^2.6.1" - universal-user-agent "^6.0.0" - -"@octokit/rest@18.0.6", "@octokit/rest@^18.0.6": - version "18.0.6" - resolved "https://registry.yarnpkg.com/@octokit/rest/-/rest-18.0.6.tgz#76c274f1a68f40741a131768ef483f041e7b98b6" - integrity sha512-ES4lZBKPJMX/yUoQjAZiyFjei9pJ4lTTfb9k7OtYoUzKPDLl/M8jiHqt6qeSauyU4eZGLw0sgP1WiQl9FYeM5w== - dependencies: - "@octokit/core" "^3.0.0" - "@octokit/plugin-paginate-rest" "^2.2.0" - "@octokit/plugin-request-log" "^1.0.0" - "@octokit/plugin-rest-endpoint-methods" "4.2.0" - -"@octokit/rest@^18.7.2": - version "18.7.2" - resolved "https://registry.yarnpkg.com/@octokit/rest/-/rest-18.7.2.tgz#8239b5acd40fccb3f5d074e7a4386980f3770821" - integrity sha512-TAedgLqNRS+rdGqS9v00sqBeS6IgyLSoqqCDu6pmoadAB7xSjFHShxzaXUAbxxJjyHtb7mencRGzgH4W/V6Myg== - dependencies: - "@octokit/core" "^3.5.0" - "@octokit/plugin-paginate-rest" "^2.6.2" - "@octokit/plugin-request-log" "^1.0.2" - "@octokit/plugin-rest-endpoint-methods" "5.5.2" - -"@octokit/types@^5.0.0", "@octokit/types@^5.0.1", "@octokit/types@^5.5.0": - version "5.5.0" - resolved "https://registry.yarnpkg.com/@octokit/types/-/types-5.5.0.tgz#e5f06e8db21246ca102aa28444cdb13ae17a139b" - integrity sha512-UZ1pErDue6bZNjYOotCNveTXArOMZQFG6hKJfOnGnulVCMcVVi7YIIuuR4WfBhjo7zgpmzn/BkPDnUXtNx+PcQ== - dependencies: - "@types/node" ">= 8" - -"@octokit/types@^6.0.3", "@octokit/types@^6.16.1", "@octokit/types@^6.18.0", "@octokit/types@^6.22.0": - version "6.22.0" - resolved "https://registry.yarnpkg.com/@octokit/types/-/types-6.22.0.tgz#389bade20955c919241b6ffb9dd33f6e0cf1cc6c" - integrity sha512-Y8GR0BJHQDpO09qw/ZQpN+DXrFzCWaE0pvK4frDm3zJ+h99AktsFfBoDazbCtHxiL8d0jD8xRH4BeynlKLeChg== - dependencies: - "@octokit/openapi-types" "^9.2.0" - -"@sindresorhus/is@^0.14.0": - version "0.14.0" - resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-0.14.0.tgz#9fb3a3cf3132328151f353de4632e01e52102bea" - integrity sha512-9NET910DNaIPngYnLLPeg+Ogzqsi9uM4mSboU5y6p8S5DzMTVEsJZrawi+BoDNUVBa2DhJqQYUFvMDfgU062LQ== - -"@sindresorhus/is@^3.1.1": - version "3.1.2" - resolved "https://registry.yarnpkg.com/@sindresorhus/is/-/is-3.1.2.tgz#548650de521b344e3781fbdb0ece4aa6f729afb8" - integrity sha512-JiX9vxoKMmu8Y3Zr2RVathBL1Cdu4Nt4MuNWemt1Nc06A0RAin9c5FArkhGsyMBWfCu4zj+9b+GxtjAnE4qqLQ== - -"@szmarczak/http-timer@^1.1.2": - version "1.1.2" - resolved "https://registry.yarnpkg.com/@szmarczak/http-timer/-/http-timer-1.1.2.tgz#b1665e2c461a2cd92f4c1bbf50d5454de0d4b421" - integrity sha512-XIB2XbzHTN6ieIjfIMV9hlVcfPU26s2vafYWQcZHWXHOxiaRZYEDKEwdl129Zyg50+foYV2jCgtrqSA6qNuNSA== - dependencies: - defer-to-connect "^1.0.1" - -"@szmarczak/http-timer@^4.0.5": - version "4.0.5" - resolved "https://registry.yarnpkg.com/@szmarczak/http-timer/-/http-timer-4.0.5.tgz#bfbd50211e9dfa51ba07da58a14cdfd333205152" - integrity sha512-PyRA9sm1Yayuj5OIoJ1hGt2YISX45w9WcFbh6ddT0Z/0yaFxOtGLInr4jUfU1EAFVs0Yfyfev4RNwBlUaHdlDQ== - dependencies: - defer-to-connect "^2.0.0" - -"@types/cacheable-request@^6.0.1": - version "6.0.1" - resolved "https://registry.yarnpkg.com/@types/cacheable-request/-/cacheable-request-6.0.1.tgz#5d22f3dded1fd3a84c0bbeb5039a7419c2c91976" - integrity sha512-ykFq2zmBGOCbpIXtoVbz4SKY5QriWPh3AjyU4G74RYbtt5yOc5OfaY75ftjg7mikMOla1CTGpX3lLbuJh8DTrQ== - dependencies: - "@types/http-cache-semantics" "*" - "@types/keyv" "*" - "@types/node" "*" - "@types/responselike" "*" - -"@types/http-cache-semantics@*": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@types/http-cache-semantics/-/http-cache-semantics-4.0.0.tgz#9140779736aa2655635ee756e2467d787cfe8a2a" - integrity sha512-c3Xy026kOF7QOTn00hbIllV1dLR9hG9NkSrLQgCVs8NF6sBU+VGWjD3wLPhmh1TYAc7ugCFsvHYMN4VcBN1U1A== - -"@types/keyv@*": - version "3.1.1" - resolved "https://registry.yarnpkg.com/@types/keyv/-/keyv-3.1.1.tgz#e45a45324fca9dab716ab1230ee249c9fb52cfa7" - integrity sha512-MPtoySlAZQ37VoLaPcTHCu1RWJ4llDkULYZIzOYxlhxBqYPB0RsRlmMU0R6tahtFe27mIdkHV+551ZWV4PLmVw== - dependencies: - "@types/node" "*" - -"@types/lodash@^4.14.171": - version "4.14.171" - resolved "https://registry.yarnpkg.com/@types/lodash/-/lodash-4.14.171.tgz#f01b3a5fe3499e34b622c362a46a609fdb23573b" - integrity sha512-7eQ2xYLLI/LsicL2nejW9Wyko3lcpN6O/z0ZLHrEQsg280zIdCv1t/0m6UtBjUHokCGBQ3gYTbHzDkZ1xOBwwg== - -"@types/node@*", "@types/node@>= 8": - version "14.11.8" - resolved "https://registry.yarnpkg.com/@types/node/-/node-14.11.8.tgz#fe2012f2355e4ce08bca44aeb3abbb21cf88d33f" - integrity sha512-KPcKqKm5UKDkaYPTuXSx8wEP7vE9GnuaXIZKijwRYcePpZFDVuy2a57LarFKiORbHOuTOOwYzxVxcUzsh2P2Pw== - -"@types/parse-json@^4.0.0": - version "4.0.0" - resolved "https://registry.yarnpkg.com/@types/parse-json/-/parse-json-4.0.0.tgz#2f8bb441434d163b35fb8ffdccd7138927ffb8c0" - integrity sha512-//oorEZjL6sbPcKUaCdIGlIUeH26mgzimjBB77G6XRgnDl/L5wOnpyBGRe/Mmf5CVW3PwEBE1NjiMZ/ssFh4wA== - -"@types/responselike@*", "@types/responselike@^1.0.0": - version "1.0.0" - resolved "https://registry.yarnpkg.com/@types/responselike/-/responselike-1.0.0.tgz#251f4fe7d154d2bad125abe1b429b23afd262e29" - integrity sha512-85Y2BjiufFzaMIlvJDvTTB8Fxl2xfLo4HgmHzVBz08w4wDePCTjYw66PdrolO0kzli3yam/YCgRufyo1DdQVTA== - dependencies: - "@types/node" "*" - -ansi-align@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/ansi-align/-/ansi-align-3.0.0.tgz#b536b371cf687caaef236c18d3e21fe3797467cb" - integrity sha512-ZpClVKqXN3RGBmKibdfWzqCY4lnjEuoNzU5T0oEFpfd/z5qJHVarukridD4juLO2FXMiwUQxr9WqQtaYa8XRYw== - dependencies: - string-width "^3.0.0" - -ansi-escapes@^4.2.1: - version "4.3.1" - resolved "https://registry.yarnpkg.com/ansi-escapes/-/ansi-escapes-4.3.1.tgz#a5c47cc43181f1f38ffd7076837700d395522a61" - integrity sha512-JWF7ocqNrp8u9oqpgV+wH5ftbt+cfvv+PTjOvKLT3AdYly/LmORARfEVT1iyjwN+4MqE5UmVKoAdIBqeoCHgLA== - dependencies: - type-fest "^0.11.0" - -ansi-regex@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-4.1.0.tgz#8b9f8f08cf1acb843756a839ca8c7e3168c51997" - integrity sha512-1apePfXM1UOSqw0o9IiFAovVz9M5S1Dg+4TrDwfMewQ6p/rmMueb7tWZjQ1rx4Loy1ArBggoqGpfqqdI4rondg== - -ansi-regex@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.0.tgz#388539f55179bf39339c81af30a654d69f87cb75" - integrity sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg== - -ansi-styles@^3.2.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d" - integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA== - dependencies: - color-convert "^1.9.0" - -ansi-styles@^4.1.0: - version "4.3.0" - resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937" - integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg== - dependencies: - color-convert "^2.0.1" - -array-union@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" - integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw== - -async-retry@1.3.1: - version "1.3.1" - resolved "https://registry.yarnpkg.com/async-retry/-/async-retry-1.3.1.tgz#139f31f8ddce50c0870b0ba558a6079684aaed55" - integrity sha512-aiieFW/7h3hY0Bq5d+ktDBejxuwR78vRu9hDUdR8rNhSaQ29VzPL4AoIRG7D/c7tdenwOcKvgPM6tIxB3cB6HA== - dependencies: - retry "0.12.0" - -asynckit@^0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" - integrity sha1-x57Zf380y48robyXkLzDZkdLS3k= - -"auto-release-notes@git://github.com/wandb/auto-release-notes.git#v0.2.0": - version "0.2.0" - resolved "git://github.com/wandb/auto-release-notes.git#e76554df3d2cf3db0c6f58e4e6b9ec6541f26a9c" - dependencies: - "@actions/core" "^1.2.6" - "@actions/github" "^5.0.0" - "@octokit/rest" "^18.7.2" - external-editor "^3.1.0" - -balanced-match@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767" - integrity sha1-ibTRmasr7kneFk6gK4nORi1xt2c= - -before-after-hook@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/before-after-hook/-/before-after-hook-2.1.0.tgz#b6c03487f44e24200dd30ca5e6a1979c5d2fb635" - integrity sha512-IWIbu7pMqyw3EAJHzzHbWa85b6oud/yfKYg5rqB5hNE8CeMi3nX+2C2sj0HswfblST86hpVEOAb9x34NZd6P7A== - -before-after-hook@^2.2.0: - version "2.2.2" - resolved "https://registry.yarnpkg.com/before-after-hook/-/before-after-hook-2.2.2.tgz#a6e8ca41028d90ee2c24222f201c90956091613e" - integrity sha512-3pZEU3NT5BFUo/AD5ERPWOgQOCZITni6iavr5AUw5AUwQjMlI0kzu5btnyD39AF0gUEsDPwJT+oY1ORBJijPjQ== - -boxen@^4.2.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/boxen/-/boxen-4.2.0.tgz#e411b62357d6d6d36587c8ac3d5d974daa070e64" - integrity sha512-eB4uT9RGzg2odpER62bBwSLvUeGC+WbRjjyyFhGsKnc8wp/m0+hQsMUvUe3H2V0D5vw0nBdO1hCJoZo5mKeuIQ== - dependencies: - ansi-align "^3.0.0" - camelcase "^5.3.1" - chalk "^3.0.0" - cli-boxes "^2.2.0" - string-width "^4.1.0" - term-size "^2.1.0" - type-fest "^0.8.1" - widest-line "^3.1.0" - -brace-expansion@^1.1.7: - version "1.1.11" - resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.11.tgz#3c7fcbf529d87226f3d2f52b966ff5271eb441dd" - integrity sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA== - dependencies: - balanced-match "^1.0.0" - concat-map "0.0.1" - -braces@^3.0.1: - version "3.0.2" - resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" - integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== - dependencies: - fill-range "^7.0.1" - -cacheable-lookup@^5.0.3: - version "5.0.3" - resolved "https://registry.yarnpkg.com/cacheable-lookup/-/cacheable-lookup-5.0.3.tgz#049fdc59dffdd4fc285e8f4f82936591bd59fec3" - integrity sha512-W+JBqF9SWe18A72XFzN/V/CULFzPm7sBXzzR6ekkE+3tLG72wFZrBiBZhrZuDoYexop4PHJVdFAKb/Nj9+tm9w== - -cacheable-request@^6.0.0: - version "6.1.0" - resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-6.1.0.tgz#20ffb8bd162ba4be11e9567d823db651052ca912" - integrity sha512-Oj3cAGPCqOZX7Rz64Uny2GYAZNliQSqfbePrgAQ1wKAihYmCUnraBtJtKcGR4xz7wF+LoJC+ssFZvv5BgF9Igg== - dependencies: - clone-response "^1.0.2" - get-stream "^5.1.0" - http-cache-semantics "^4.0.0" - keyv "^3.0.0" - lowercase-keys "^2.0.0" - normalize-url "^4.1.0" - responselike "^1.0.2" - -cacheable-request@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/cacheable-request/-/cacheable-request-7.0.1.tgz#062031c2856232782ed694a257fa35da93942a58" - integrity sha512-lt0mJ6YAnsrBErpTMWeu5kl/tg9xMAWjavYTN6VQXM1A/teBITuNcccXsCxF0tDQQJf9DfAaX5O4e0zp0KlfZw== - dependencies: - clone-response "^1.0.2" - get-stream "^5.1.0" - http-cache-semantics "^4.0.0" - keyv "^4.0.0" - lowercase-keys "^2.0.0" - normalize-url "^4.1.0" - responselike "^2.0.0" - -callsites@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/callsites/-/callsites-3.1.0.tgz#b3630abd8943432f54b3f0519238e33cd7df2f73" - integrity sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ== - -camelcase@^5.3.1: - version "5.3.1" - resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320" - integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg== - -chalk@4.1.0, chalk@^4.0.0, chalk@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.0.tgz#4e14870a618d9e2edd97dd8345fd9d9dc315646a" - integrity sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A== - dependencies: - ansi-styles "^4.1.0" - supports-color "^7.1.0" - -chalk@^2.0.0: - version "2.4.2" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424" - integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ== - dependencies: - ansi-styles "^3.2.1" - escape-string-regexp "^1.0.5" - supports-color "^5.3.0" - -chalk@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/chalk/-/chalk-3.0.0.tgz#3f73c2bf526591f574cc492c51e2456349f844e4" - integrity sha512-4D3B6Wf41KOYRFdszmDqMCGq5VV/uMAB273JILmO+3jAlh8X4qDtdtgCR3fxtbLEMzSx22QdhnDcJvu2u1fVwg== - dependencies: - ansi-styles "^4.1.0" - supports-color "^7.1.0" - -chardet@^0.7.0: - version "0.7.0" - resolved "https://registry.yarnpkg.com/chardet/-/chardet-0.7.0.tgz#90094849f0937f2eedc2425d0d28a9e5f0cbad9e" - integrity sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA== - -ci-info@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/ci-info/-/ci-info-2.0.0.tgz#67a9e964be31a51e15e5010d58e6f12834002f46" - integrity sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ== - -cli-boxes@^2.2.0: - version "2.2.1" - resolved "https://registry.yarnpkg.com/cli-boxes/-/cli-boxes-2.2.1.tgz#ddd5035d25094fce220e9cab40a45840a440318f" - integrity sha512-y4coMcylgSCdVinjiDBuR8PCC2bLjyGTwEmPb9NHR/QaNU6EUOXcTY/s6VjGMD6ENSEaeQYHCY0GNGS5jfMwPw== - -cli-cursor@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/cli-cursor/-/cli-cursor-3.1.0.tgz#264305a7ae490d1d03bf0c9ba7c925d1753af307" - integrity sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw== - dependencies: - restore-cursor "^3.1.0" - -cli-spinners@^2.4.0: - version "2.5.0" - resolved "https://registry.yarnpkg.com/cli-spinners/-/cli-spinners-2.5.0.tgz#12763e47251bf951cb75c201dfa58ff1bcb2d047" - integrity sha512-PC+AmIuK04E6aeSs/pUccSujsTzBhu4HzC2dL+CfJB/Jcc2qTRbEwZQDfIUpt2Xl8BodYBEq8w4fc0kU2I9DjQ== - -cli-width@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-3.0.0.tgz#a2f48437a2caa9a22436e794bf071ec9e61cedf6" - integrity sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw== - -clone-response@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/clone-response/-/clone-response-1.0.2.tgz#d1dc973920314df67fbeb94223b4ee350239e96b" - integrity sha1-0dyXOSAxTfZ/vrlCI7TuNQI56Ws= - dependencies: - mimic-response "^1.0.0" - -clone@^1.0.2: - version "1.0.4" - resolved "https://registry.yarnpkg.com/clone/-/clone-1.0.4.tgz#da309cc263df15994c688ca902179ca3c7cd7c7e" - integrity sha1-2jCcwmPfFZlMaIypAheco8fNfH4= - -color-convert@^1.9.0: - version "1.9.3" - resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" - integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg== - dependencies: - color-name "1.1.3" - -color-convert@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3" - integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ== - dependencies: - color-name "~1.1.4" - -color-name@1.1.3: - version "1.1.3" - resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25" - integrity sha1-p9BVi9icQveV3UIyj3QIMcpTvCU= - -color-name@~1.1.4: - version "1.1.4" - resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2" - integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA== - -combined-stream@^1.0.8: - version "1.0.8" - resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" - integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== - dependencies: - delayed-stream "~1.0.0" - -concat-map@0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/concat-map/-/concat-map-0.0.1.tgz#d8a96bd77fd68df7793a73036a3ba0d5405d477b" - integrity sha1-2Klr13/Wjfd5OnMDajug1UBdR3s= - -configstore@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/configstore/-/configstore-5.0.1.tgz#d365021b5df4b98cdd187d6a3b0e3f6a7cc5ed96" - integrity sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA== - dependencies: - dot-prop "^5.2.0" - graceful-fs "^4.1.2" - make-dir "^3.0.0" - unique-string "^2.0.0" - write-file-atomic "^3.0.0" - xdg-basedir "^4.0.0" - -cosmiconfig@7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/cosmiconfig/-/cosmiconfig-7.0.0.tgz#ef9b44d773959cae63ddecd122de23853b60f8d3" - integrity sha512-pondGvTuVYDk++upghXJabWzL6Kxu6f26ljFw64Swq9v6sQPUL3EUlVDV56diOjpCayKihL6hVe8exIACU4XcA== - dependencies: - "@types/parse-json" "^4.0.0" - import-fresh "^3.2.1" - parse-json "^5.0.0" - path-type "^4.0.0" - yaml "^1.10.0" - -cross-spawn@^7.0.0: - version "7.0.3" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" - integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== - dependencies: - path-key "^3.1.0" - shebang-command "^2.0.0" - which "^2.0.1" - -crypto-random-string@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/crypto-random-string/-/crypto-random-string-2.0.0.tgz#ef2a7a966ec11083388369baa02ebead229b30d5" - integrity sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA== - -debug@4.2.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.2.0.tgz#7f150f93920e94c58f5574c2fd01a3110effe7f1" - integrity sha512-IX2ncY78vDTjZMFUdmsvIRFY2Cf4FnD0wRs+nQwJU8Lu99/tPFdb0VybiiMTPe3I6rQmwsqQqRBvxU+bZ/I8sg== - dependencies: - ms "2.1.2" - -decompress-response@^3.3.0: - version "3.3.0" - resolved "https://registry.yarnpkg.com/decompress-response/-/decompress-response-3.3.0.tgz#80a4dd323748384bfa248083622aedec982adff3" - integrity sha1-gKTdMjdIOEv6JICDYirt7Jgq3/M= - dependencies: - mimic-response "^1.0.0" - -decompress-response@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/decompress-response/-/decompress-response-6.0.0.tgz#ca387612ddb7e104bd16d85aab00d5ecf09c66fc" - integrity sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ== - dependencies: - mimic-response "^3.1.0" - -deep-extend@^0.6.0: - version "0.6.0" - resolved "https://registry.yarnpkg.com/deep-extend/-/deep-extend-0.6.0.tgz#c4fa7c95404a17a9c3e8ca7e1537312b736330ac" - integrity sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA== - -defaults@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/defaults/-/defaults-1.0.3.tgz#c656051e9817d9ff08ed881477f3fe4019f3ef7d" - integrity sha1-xlYFHpgX2f8I7YgUd/P+QBnz730= - dependencies: - clone "^1.0.2" - -defer-to-connect@^1.0.1: - version "1.1.3" - resolved "https://registry.yarnpkg.com/defer-to-connect/-/defer-to-connect-1.1.3.tgz#331ae050c08dcf789f8c83a7b81f0ed94f4ac591" - integrity sha512-0ISdNousHvZT2EiFlZeZAHBUvSxmKswVCEf8hW7KWgG4a8MVEu/3Vb6uWYozkjylyCxe0JBIiRB1jV45S70WVQ== - -defer-to-connect@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/defer-to-connect/-/defer-to-connect-2.0.0.tgz#83d6b199db041593ac84d781b5222308ccf4c2c1" - integrity sha512-bYL2d05vOSf1JEZNx5vSAtPuBMkX8K9EUutg7zlKvTqKXHt7RhWJFbmd7qakVuf13i+IkGmp6FwSsONOf6VYIg== - -delayed-stream@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" - integrity sha1-3zrhmayt+31ECqrgsp4icrJOxhk= - -deprecated-obj@2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/deprecated-obj/-/deprecated-obj-2.0.0.tgz#e6ba93a3989f6ed18d685e7d99fb8d469b4beffc" - integrity sha512-CkdywZC2rJ8RGh+y3MM1fw1EJ4oO/oNExGbRFv0AQoMS+faTd3nO7slYjkj/6t8OnIMUE+wxh6G97YHhK1ytrw== - dependencies: - flat "^5.0.2" - lodash "^4.17.20" - -deprecation@^2.0.0, deprecation@^2.3.1: - version "2.3.1" - resolved "https://registry.yarnpkg.com/deprecation/-/deprecation-2.3.1.tgz#6368cbdb40abf3373b525ac87e4a260c3a700919" - integrity sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ== - -dir-glob@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f" - integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA== - dependencies: - path-type "^4.0.0" - -dot-prop@^5.2.0: - version "5.3.0" - resolved "https://registry.yarnpkg.com/dot-prop/-/dot-prop-5.3.0.tgz#90ccce708cd9cd82cc4dc8c3ddd9abdd55b20e88" - integrity sha512-QM8q3zDe58hqUqjraQOmzZ1LIH9SWQJTlEKCH4kJ2oQvLZk7RbQXvtDM2XEq3fwkV9CCvvH4LA0AV+ogFsBM2Q== - dependencies: - is-obj "^2.0.0" - -duplexer3@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/duplexer3/-/duplexer3-0.1.4.tgz#ee01dd1cac0ed3cbc7fdbea37dc0a8f1ce002ce2" - integrity sha1-7gHdHKwO08vH/b6jfcCo8c4ALOI= - -emoji-regex@^7.0.1: - version "7.0.3" - resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-7.0.3.tgz#933a04052860c85e83c122479c4748a8e4c72156" - integrity sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA== - -emoji-regex@^8.0.0: - version "8.0.0" - resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37" - integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A== - -end-of-stream@^1.1.0: - version "1.4.4" - resolved "https://registry.yarnpkg.com/end-of-stream/-/end-of-stream-1.4.4.tgz#5ae64a5f45057baf3626ec14da0ca5e4b2431eb0" - integrity sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q== - dependencies: - once "^1.4.0" - -error-ex@^1.3.1: - version "1.3.2" - resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf" - integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g== - dependencies: - is-arrayish "^0.2.1" - -escape-goat@^2.0.0: - version "2.1.1" - resolved "https://registry.yarnpkg.com/escape-goat/-/escape-goat-2.1.1.tgz#1b2dc77003676c457ec760b2dc68edb648188675" - integrity sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q== - -escape-string-regexp@^1.0.5: - version "1.0.5" - resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4" - integrity sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ= - -execa@4.0.3, execa@^4.0.2: - version "4.0.3" - resolved "https://registry.yarnpkg.com/execa/-/execa-4.0.3.tgz#0a34dabbad6d66100bd6f2c576c8669403f317f2" - integrity sha512-WFDXGHckXPWZX19t1kCsXzOpqX9LWYNqn4C+HqZlk/V0imTkzJZqf87ZBhvpHaftERYknpk0fjSylnXVlVgI0A== - dependencies: - cross-spawn "^7.0.0" - get-stream "^5.0.0" - human-signals "^1.1.1" - is-stream "^2.0.0" - merge-stream "^2.0.0" - npm-run-path "^4.0.0" - onetime "^5.1.0" - signal-exit "^3.0.2" - strip-final-newline "^2.0.0" - -external-editor@^3.0.3, external-editor@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/external-editor/-/external-editor-3.1.0.tgz#cb03f740befae03ea4d283caed2741a83f335495" - integrity sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew== - dependencies: - chardet "^0.7.0" - iconv-lite "^0.4.24" - tmp "^0.0.33" - -fast-glob@^3.1.1: - version "3.2.4" - resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.2.4.tgz#d20aefbf99579383e7f3cc66529158c9b98554d3" - integrity sha512-kr/Oo6PX51265qeuCYsyGypiO5uJFgBS0jksyG7FUeCyQzNwYnzrNIMR1NXfkZXsMYXYLRAHgISHBz8gQcxKHQ== - dependencies: - "@nodelib/fs.stat" "^2.0.2" - "@nodelib/fs.walk" "^1.2.3" - glob-parent "^5.1.0" - merge2 "^1.3.0" - micromatch "^4.0.2" - picomatch "^2.2.1" - -fastq@^1.6.0: - version "1.8.0" - resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.8.0.tgz#550e1f9f59bbc65fe185cb6a9b4d95357107f481" - integrity sha512-SMIZoZdLh/fgofivvIkmknUXyPnvxRE3DhtZ5Me3Mrsk5gyPL42F0xr51TdRXskBxHfMp+07bcYzfsYEsSQA9Q== - dependencies: - reusify "^1.0.4" - -figures@^3.0.0: - version "3.2.0" - resolved "https://registry.yarnpkg.com/figures/-/figures-3.2.0.tgz#625c18bd293c604dc4a8ddb2febf0c88341746af" - integrity sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg== - dependencies: - escape-string-regexp "^1.0.5" - -fill-range@^7.0.1: - version "7.0.1" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" - integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== - dependencies: - to-regex-range "^5.0.1" - -find-up@5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/find-up/-/find-up-5.0.0.tgz#4c92819ecb7083561e4f4a240a86be5198f536fc" - integrity sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng== - dependencies: - locate-path "^6.0.0" - path-exists "^4.0.0" - -flat@^5.0.2: - version "5.0.2" - resolved "https://registry.yarnpkg.com/flat/-/flat-5.0.2.tgz#8ca6fe332069ffa9d324c327198c598259ceb241" - integrity sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ== - -form-data@3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-3.0.0.tgz#31b7e39c85f1355b7139ee0c647cf0de7f83c682" - integrity sha512-CKMFDglpbMi6PyN+brwB9Q/GOw0eAnsrEZDgcsH5Krhz5Od/haKHAX0NmQfha2zPPz0JpWzA7GJHGSnvCRLWsg== - dependencies: - asynckit "^0.4.0" - combined-stream "^1.0.8" - mime-types "^2.1.12" - -fs.realpath@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/fs.realpath/-/fs.realpath-1.0.0.tgz#1504ad2523158caa40db4a2787cb01411994ea4f" - integrity sha1-FQStJSMVjKpA20onh8sBQRmU6k8= - -get-stream@^4.1.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-4.1.0.tgz#c1b255575f3dc21d59bfc79cd3d2b46b1c3a54b5" - integrity sha512-GMat4EJ5161kIy2HevLlr4luNjBgvmj413KaQA7jt4V8B4RDsfpHk7WQ9GVqfYyyx8OS/L66Kox+rJRNklLK7w== - dependencies: - pump "^3.0.0" - -get-stream@^5.0.0, get-stream@^5.1.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/get-stream/-/get-stream-5.2.0.tgz#4966a1795ee5ace65e706c4b7beb71257d6e22d3" - integrity sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA== - dependencies: - pump "^3.0.0" - -git-up@^4.0.0: - version "4.0.2" - resolved "https://registry.yarnpkg.com/git-up/-/git-up-4.0.2.tgz#10c3d731051b366dc19d3df454bfca3f77913a7c" - integrity sha512-kbuvus1dWQB2sSW4cbfTeGpCMd8ge9jx9RKnhXhuJ7tnvT+NIrTVfYZxjtflZddQYcmdOTlkAcjmx7bor+15AQ== - dependencies: - is-ssh "^1.3.0" - parse-url "^5.0.0" - -git-url-parse@11.3.0: - version "11.3.0" - resolved "https://registry.yarnpkg.com/git-url-parse/-/git-url-parse-11.3.0.tgz#1515b4574c4eb2efda7d25cc50b29ce8beaefaae" - integrity sha512-i3XNa8IKmqnUqWBcdWBjOcnyZYfN3C1WRvnKI6ouFWwsXCZEnlgbwbm55ZpJ3OJMhfEP/ryFhqW8bBhej3C5Ug== - dependencies: - git-up "^4.0.0" - -glob-parent@^5.1.0: - version "5.1.1" - resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.1.tgz#b6c1ef417c4e5663ea498f1c45afac6916bbc229" - integrity sha512-FnI+VGOpnlGHWZxthPGR+QhR78fuiK0sNLkHQv+bL9fQi57lNNdquIbna/WrfROrolq8GK5Ek6BiMwqL/voRYQ== - dependencies: - is-glob "^4.0.1" - -glob@^7.0.0: - version "7.1.6" - resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.6.tgz#141f33b81a7c2492e125594307480c46679278a6" - integrity sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA== - dependencies: - fs.realpath "^1.0.0" - inflight "^1.0.4" - inherits "2" - minimatch "^3.0.4" - once "^1.3.0" - path-is-absolute "^1.0.0" - -global-dirs@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/global-dirs/-/global-dirs-2.0.1.tgz#acdf3bb6685bcd55cb35e8a052266569e9469201" - integrity sha512-5HqUqdhkEovj2Of/ms3IeS/EekcO54ytHRLV4PEY2rhRwrHXLQjeVEES0Lhka0xwNDtGYn58wyC4s5+MHsOO6A== - dependencies: - ini "^1.3.5" - -globby@11.0.1: - version "11.0.1" - resolved "https://registry.yarnpkg.com/globby/-/globby-11.0.1.tgz#9a2bf107a068f3ffeabc49ad702c79ede8cfd357" - integrity sha512-iH9RmgwCmUJHi2z5o2l3eTtGBtXek1OYlHrbcxOYugyHLmAsZrPj43OtHThd62Buh/Vv6VyCBD2bdyWcGNQqoQ== - dependencies: - array-union "^2.1.0" - dir-glob "^3.0.1" - fast-glob "^3.1.1" - ignore "^5.1.4" - merge2 "^1.3.0" - slash "^3.0.0" - -got@11.7.0: - version "11.7.0" - resolved "https://registry.yarnpkg.com/got/-/got-11.7.0.tgz#a386360305571a74548872e674932b4ef70d3b24" - integrity sha512-7en2XwH2MEqOsrK0xaKhbWibBoZqy+f1RSUoIeF1BLcnf+pyQdDsljWMfmOh+QKJwuvDIiKx38GtPh5wFdGGjg== - dependencies: - "@sindresorhus/is" "^3.1.1" - "@szmarczak/http-timer" "^4.0.5" - "@types/cacheable-request" "^6.0.1" - "@types/responselike" "^1.0.0" - cacheable-lookup "^5.0.3" - cacheable-request "^7.0.1" - decompress-response "^6.0.0" - http2-wrapper "^1.0.0-beta.5.2" - lowercase-keys "^2.0.0" - p-cancelable "^2.0.0" - responselike "^2.0.0" - -got@^9.6.0: - version "9.6.0" - resolved "https://registry.yarnpkg.com/got/-/got-9.6.0.tgz#edf45e7d67f99545705de1f7bbeeeb121765ed85" - integrity sha512-R7eWptXuGYxwijs0eV+v3o6+XH1IqVK8dJOEecQfTmkncw9AV4dcw/Dhxi8MdlqPthxxpZyizMzyg8RTmEsG+Q== - dependencies: - "@sindresorhus/is" "^0.14.0" - "@szmarczak/http-timer" "^1.1.2" - cacheable-request "^6.0.0" - decompress-response "^3.3.0" - duplexer3 "^0.1.4" - get-stream "^4.1.0" - lowercase-keys "^1.0.1" - mimic-response "^1.0.1" - p-cancelable "^1.0.0" - to-readable-stream "^1.0.0" - url-parse-lax "^3.0.0" - -graceful-fs@^4.1.2: - version "4.2.4" - resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.2.4.tgz#2256bde14d3632958c465ebc96dc467ca07a29fb" - integrity sha512-WjKPNJF79dtJAVniUlGGWHYGz2jWxT6VhN/4m1NdkbZ2nOsEF+cI1Edgql5zCRhs/VsQYRvrXctxktVXZUkixw== - -has-flag@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd" - integrity sha1-tdRU3CGZriJWmfNGfloH87lVuv0= - -has-flag@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-4.0.0.tgz#944771fd9c81c81265c4d6941860da06bb59479b" - integrity sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ== - -has-yarn@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/has-yarn/-/has-yarn-2.1.0.tgz#137e11354a7b5bf11aa5cb649cf0c6f3ff2b2e77" - integrity sha512-UqBRqi4ju7T+TqGNdqAO0PaSVGsDGJUBQvk9eUWNGRY1CFGDzYhLWoM7JQEemnlvVcv/YEmc2wNW8BC24EnUsw== - -http-cache-semantics@^4.0.0: - version "4.1.0" - resolved "https://registry.yarnpkg.com/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz#49e91c5cbf36c9b94bcfcd71c23d5249ec74e390" - integrity sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ== - -http2-wrapper@^1.0.0-beta.5.2: - version "1.0.0-beta.5.2" - resolved "https://registry.yarnpkg.com/http2-wrapper/-/http2-wrapper-1.0.0-beta.5.2.tgz#8b923deb90144aea65cf834b016a340fc98556f3" - integrity sha512-xYz9goEyBnC8XwXDTuC/MZ6t+MrKVQZOk4s7+PaDkwIsQd8IwqvM+0M6bA/2lvG8GHXcPdf+MejTUeO2LCPCeQ== - dependencies: - quick-lru "^5.1.1" - resolve-alpn "^1.0.0" - -human-signals@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-1.1.1.tgz#c5b1cd14f50aeae09ab6c59fe63ba3395fe4dfa3" - integrity sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw== - -iconv-lite@^0.4.24: - version "0.4.24" - resolved "https://registry.yarnpkg.com/iconv-lite/-/iconv-lite-0.4.24.tgz#2022b4b25fbddc21d2f524974a474aafe733908b" - integrity sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA== - dependencies: - safer-buffer ">= 2.1.2 < 3" - -ignore@^5.1.4: - version "5.1.8" - resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.1.8.tgz#f150a8b50a34289b33e22f5889abd4d8016f0e57" - integrity sha512-BMpfD7PpiETpBl/A6S498BaIJ6Y/ABT93ETbby2fP00v4EbvPBXWEoaR1UBPKs3iR53pJY7EtZk5KACI57i1Uw== - -import-cwd@3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/import-cwd/-/import-cwd-3.0.0.tgz#20845547718015126ea9b3676b7592fb8bd4cf92" - integrity sha512-4pnzH16plW+hgvRECbDWpQl3cqtvSofHWh44met7ESfZ8UZOWWddm8hEyDTqREJ9RbYHY8gi8DqmaelApoOGMg== - dependencies: - import-from "^3.0.0" - -import-fresh@^3.2.1: - version "3.2.1" - resolved "https://registry.yarnpkg.com/import-fresh/-/import-fresh-3.2.1.tgz#633ff618506e793af5ac91bf48b72677e15cbe66" - integrity sha512-6e1q1cnWP2RXD9/keSkxHScg508CdXqXWgWBaETNhyuBFz+kUZlKboh+ISK+bU++DmbHimVBrOz/zzPe0sZ3sQ== - dependencies: - parent-module "^1.0.0" - resolve-from "^4.0.0" - -import-from@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/import-from/-/import-from-3.0.0.tgz#055cfec38cd5a27d8057ca51376d7d3bf0891966" - integrity sha512-CiuXOFFSzkU5x/CR0+z7T91Iht4CXgfCxVOFRhh2Zyhg5wOpWvvDLQUsWl+gcN+QscYBjez8hDCt85O7RLDttQ== - dependencies: - resolve-from "^5.0.0" - -import-lazy@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/import-lazy/-/import-lazy-2.1.0.tgz#05698e3d45c88e8d7e9d92cb0584e77f096f3e43" - integrity sha1-BWmOPUXIjo1+nZLLBYTnfwlvPkM= - -imurmurhash@^0.1.4: - version "0.1.4" - resolved "https://registry.yarnpkg.com/imurmurhash/-/imurmurhash-0.1.4.tgz#9218b9b2b928a238b13dc4fb6b6d576f231453ea" - integrity sha1-khi5srkoojixPcT7a21XbyMUU+o= - -inflight@^1.0.4: - version "1.0.6" - resolved "https://registry.yarnpkg.com/inflight/-/inflight-1.0.6.tgz#49bd6331d7d02d0c09bc910a1075ba8165b56df9" - integrity sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk= - dependencies: - once "^1.3.0" - wrappy "1" - -inherits@2: - version "2.0.4" - resolved "https://registry.yarnpkg.com/inherits/-/inherits-2.0.4.tgz#0fa2c64f932917c3433a0ded55363aae37416b7c" - integrity sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ== - -ini@^1.3.5, ini@~1.3.0: - version "1.3.5" - resolved "https://registry.yarnpkg.com/ini/-/ini-1.3.5.tgz#eee25f56db1c9ec6085e0c22778083f596abf927" - integrity sha512-RZY5huIKCMRWDUqZlEi72f/lmXKMvuszcMBduliQ3nnWbx9X/ZBQO7DijMEYS9EhHBb2qacRUMtC7svLwe0lcw== - -inquirer@7.3.3: - version "7.3.3" - resolved "https://registry.yarnpkg.com/inquirer/-/inquirer-7.3.3.tgz#04d176b2af04afc157a83fd7c100e98ee0aad003" - integrity sha512-JG3eIAj5V9CwcGvuOmoo6LB9kbAYT8HXffUl6memuszlwDC/qvFAJw49XJ5NROSFNPxp3iQg1GqkFhaY/CR0IA== - dependencies: - ansi-escapes "^4.2.1" - chalk "^4.1.0" - cli-cursor "^3.1.0" - cli-width "^3.0.0" - external-editor "^3.0.3" - figures "^3.0.0" - lodash "^4.17.19" - mute-stream "0.0.8" - run-async "^2.4.0" - rxjs "^6.6.0" - string-width "^4.1.0" - strip-ansi "^6.0.0" - through "^2.3.6" - -interpret@^1.0.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/interpret/-/interpret-1.4.0.tgz#665ab8bc4da27a774a40584e812e3e0fa45b1a1e" - integrity sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA== - -is-arrayish@^0.2.1: - version "0.2.1" - resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d" - integrity sha1-d8mYQFJ6qOyxqLppe4BkWnqSap0= - -is-ci@2.0.0, is-ci@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-ci/-/is-ci-2.0.0.tgz#6bc6334181810e04b5c22b3d589fdca55026404c" - integrity sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w== - dependencies: - ci-info "^2.0.0" - -is-extglob@^2.1.1: - version "2.1.1" - resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2" - integrity sha1-qIwCU1eR8C7TfHahueqXc8gz+MI= - -is-fullwidth-code-point@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz#a3b30a5c4f199183167aaab93beefae3ddfb654f" - integrity sha1-o7MKXE8ZkYMWeqq5O+764937ZU8= - -is-fullwidth-code-point@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d" - integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg== - -is-glob@^4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.1.tgz#7567dbe9f2f5e2467bc77ab83c4a29482407a5dc" - integrity sha512-5G0tKtBTFImOqDnLB2hG6Bp2qcKEFduo4tZu9MT/H6NQv/ghhy30o55ufafxJ/LdH79LLs2Kfrn85TLKyA7BUg== - dependencies: - is-extglob "^2.1.1" - -is-installed-globally@^0.3.1: - version "0.3.2" - resolved "https://registry.yarnpkg.com/is-installed-globally/-/is-installed-globally-0.3.2.tgz#fd3efa79ee670d1187233182d5b0a1dd00313141" - integrity sha512-wZ8x1js7Ia0kecP/CHM/3ABkAmujX7WPvQk6uu3Fly/Mk44pySulQpnHG46OMjHGXApINnV4QhY3SWnECO2z5g== - dependencies: - global-dirs "^2.0.1" - is-path-inside "^3.0.1" - -is-interactive@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-interactive/-/is-interactive-1.0.0.tgz#cea6e6ae5c870a7b0a0004070b7b587e0252912e" - integrity sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w== - -is-npm@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/is-npm/-/is-npm-5.0.0.tgz#43e8d65cc56e1b67f8d47262cf667099193f45a8" - integrity sha512-WW/rQLOazUq+ST/bCAVBp/2oMERWLsR7OrKyt052dNDk4DHcDE0/7QSXITlmi+VBcV13DfIbysG3tZJm5RfdBA== - -is-number@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b" - integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng== - -is-obj@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-obj/-/is-obj-2.0.0.tgz#473fb05d973705e3fd9620545018ca8e22ef4982" - integrity sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w== - -is-path-inside@^3.0.1: - version "3.0.2" - resolved "https://registry.yarnpkg.com/is-path-inside/-/is-path-inside-3.0.2.tgz#f5220fc82a3e233757291dddc9c5877f2a1f3017" - integrity sha512-/2UGPSgmtqwo1ktx8NDHjuPwZWmHhO+gj0f93EkhLB5RgW9RZevWYYlIkS6zePc6U2WpOdQYIwHe9YC4DWEBVg== - -is-plain-object@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/is-plain-object/-/is-plain-object-5.0.0.tgz#4427f50ab3429e9025ea7d52e9043a9ef4159344" - integrity sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q== - -is-ssh@^1.3.0: - version "1.3.2" - resolved "https://registry.yarnpkg.com/is-ssh/-/is-ssh-1.3.2.tgz#a4b82ab63d73976fd8263cceee27f99a88bdae2b" - integrity sha512-elEw0/0c2UscLrNG+OAorbP539E3rhliKPg+hDMWN9VwrDXfYK+4PBEykDPfxlYYtQvl84TascnQyobfQLHEhQ== - dependencies: - protocols "^1.1.0" - -is-stream@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.0.tgz#bde9c32680d6fae04129d6ac9d921ce7815f78e3" - integrity sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw== - -is-typedarray@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/is-typedarray/-/is-typedarray-1.0.0.tgz#e479c80858df0c1b11ddda6940f96011fcda4a9a" - integrity sha1-5HnICFjfDBsR3dppQPlgEfzaSpo= - -is-yarn-global@^0.3.0: - version "0.3.0" - resolved "https://registry.yarnpkg.com/is-yarn-global/-/is-yarn-global-0.3.0.tgz#d502d3382590ea3004893746754c89139973e232" - integrity sha512-VjSeb/lHmkoyd8ryPVIKvOCn4D1koMqY+vqyjjUfc3xyKtP4dYOxM44sZrnqQSzSds3xyOrUTLTC9LVCVgLngw== - -isexe@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10" - integrity sha1-6PvzdNxVb/iUehDcsFctYz8s+hA= - -js-tokens@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499" - integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ== - -json-buffer@3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.0.tgz#5b1f397afc75d677bde8bcfc0e47e1f9a3d9a898" - integrity sha1-Wx85evx11ne96Lz8Dkfh+aPZqJg= - -json-buffer@3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/json-buffer/-/json-buffer-3.0.1.tgz#9338802a30d3b6605fbe0613e094008ca8c05a13" - integrity sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ== - -json-parse-even-better-errors@^2.3.0: - version "2.3.1" - resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d" - integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w== - -keyv@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/keyv/-/keyv-3.1.0.tgz#ecc228486f69991e49e9476485a5be1e8fc5c4d9" - integrity sha512-9ykJ/46SN/9KPM/sichzQ7OvXyGDYKGTaDlKMGCAlg2UK8KRy4jb0d8sFc+0Tt0YYnThq8X2RZgCg74RPxgcVA== - dependencies: - json-buffer "3.0.0" - -keyv@^4.0.0: - version "4.0.3" - resolved "https://registry.yarnpkg.com/keyv/-/keyv-4.0.3.tgz#4f3aa98de254803cafcd2896734108daa35e4254" - integrity sha512-zdGa2TOpSZPq5mU6iowDARnMBZgtCqJ11dJROFi6tg6kTn4nuUdU09lFyLFSaHrWqpIJ+EBq4E8/Dc0Vx5vLdA== - dependencies: - json-buffer "3.0.1" - -latest-version@^5.0.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/latest-version/-/latest-version-5.1.0.tgz#119dfe908fe38d15dfa43ecd13fa12ec8832face" - integrity sha512-weT+r0kTkRQdCdYCNtkMwWXQTMEswKrFBkm4ckQOMVhhqhIMI1UT2hMj+1iigIhgSZm5gTmrRXBNoGUgaTY1xA== - dependencies: - package-json "^6.3.0" - -lines-and-columns@^1.1.6: - version "1.1.6" - resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.1.6.tgz#1c00c743b433cd0a4e80758f7b64a57440d9ff00" - integrity sha1-HADHQ7QzzQpOgHWPe2SldEDZ/wA= - -locate-path@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-6.0.0.tgz#55321eb309febbc59c4801d931a72452a681d286" - integrity sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw== - dependencies: - p-locate "^5.0.0" - -lodash@4.17.20, lodash@^4.17.19, lodash@^4.17.20: - version "4.17.20" - resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.20.tgz#b44a9b6297bcb698f1c51a3545a2b3b368d59c52" - integrity sha512-PlhdFcillOINfeV7Ni6oF1TAEayyZBoZ8bcshTHqOYJYlrqzRK5hagpagky5o4HfCzzd1TRkXPMFq6cKk9rGmA== - -lodash@^4.17.21: - version "4.17.21" - resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" - integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== - -log-symbols@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.0.0.tgz#69b3cc46d20f448eccdb75ea1fa733d9e821c920" - integrity sha512-FN8JBzLx6CzeMrB0tg6pqlGU1wCrXW+ZXGH481kfsBqer0hToTIiHdjH4Mq8xJUbvATujKCvaREGWpGUionraA== - dependencies: - chalk "^4.0.0" - -lowercase-keys@^1.0.0, lowercase-keys@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-1.0.1.tgz#6f9e30b47084d971a7c820ff15a6c5167b74c26f" - integrity sha512-G2Lj61tXDnVFFOi8VZds+SoQjtQC3dgokKdDG2mTm1tx4m50NUHBOZSBwQQHyy0V12A0JTG4icfZQH+xPyh8VA== - -lowercase-keys@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/lowercase-keys/-/lowercase-keys-2.0.0.tgz#2603e78b7b4b0006cbca2fbcc8a3202558ac9479" - integrity sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA== - -macos-release@^2.2.0: - version "2.4.1" - resolved "https://registry.yarnpkg.com/macos-release/-/macos-release-2.4.1.tgz#64033d0ec6a5e6375155a74b1a1eba8e509820ac" - integrity sha512-H/QHeBIN1fIGJX517pvK8IEK53yQOW7YcEI55oYtgjDdoCQQz7eJS94qt5kNrscReEyuD/JcdFCm2XBEcGOITg== - -make-dir@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/make-dir/-/make-dir-3.1.0.tgz#415e967046b3a7f1d185277d84aa58203726a13f" - integrity sha512-g3FeP20LNwhALb/6Cz6Dd4F2ngze0jz7tbzrD2wAV+o9FeNHe4rL+yK2md0J/fiSf1sa1ADhXqi5+oVwOM/eGw== - dependencies: - semver "^6.0.0" - -merge-stream@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/merge-stream/-/merge-stream-2.0.0.tgz#52823629a14dd00c9770fb6ad47dc6310f2c1f60" - integrity sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w== - -merge2@^1.3.0: - version "1.4.1" - resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae" - integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg== - -micromatch@^4.0.2: - version "4.0.2" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.2.tgz#4fcb0999bf9fbc2fcbdd212f6d629b9a56c39259" - integrity sha512-y7FpHSbMUMoyPbYUSzO6PaZ6FyRnQOpHuKwbo1G+Knck95XVU4QAiKdGEnj5wwoS7PlOgthX/09u5iFJ+aYf5Q== - dependencies: - braces "^3.0.1" - picomatch "^2.0.5" - -mime-db@1.44.0: - version "1.44.0" - resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.44.0.tgz#fa11c5eb0aca1334b4233cb4d52f10c5a6272f92" - integrity sha512-/NOTfLrsPBVeH7YtFPgsVWveuL+4SjjYxaQ1xtM1KMFj7HdxlBlxeyNLzhyJVx7r4rZGJAZ/6lkKCitSc/Nmpg== - -mime-types@2.1.27, mime-types@^2.1.12: - version "2.1.27" - resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.27.tgz#47949f98e279ea53119f5722e0f34e529bec009f" - integrity sha512-JIhqnCasI9yD+SsmkquHBxTSEuZdQX5BuQnS2Vc7puQQQ+8yiP5AY5uWhpdv4YL4VM5c6iliiYWPgJ/nJQLp7w== - dependencies: - mime-db "1.44.0" - -mimic-fn@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" - integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== - -mimic-response@^1.0.0, mimic-response@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-1.0.1.tgz#4923538878eef42063cb8a3e3b0798781487ab1b" - integrity sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ== - -mimic-response@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/mimic-response/-/mimic-response-3.1.0.tgz#2d1d59af9c1b129815accc2c46a022a5ce1fa3c9" - integrity sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ== - -minimatch@^3.0.4: - version "3.0.4" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.4.tgz#5166e286457f03306064be5497e8dbb0c3d32083" - integrity sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA== - dependencies: - brace-expansion "^1.1.7" - -minimist@^1.2.0: - version "1.2.5" - resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.5.tgz#67d66014b66a6a8aaa0c083c5fd58df4e4e97602" - integrity sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw== - -ms@2.1.2: - version "2.1.2" - resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" - integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== - -mute-stream@0.0.8: - version "0.0.8" - resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.8.tgz#1630c42b2251ff81e2a283de96a5497ea92e5e0d" - integrity sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA== - -node-fetch@^2.6.1: - version "2.6.1" - resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.6.1.tgz#045bd323631f76ed2e2b55573394416b639a0052" - integrity sha512-V4aYg89jEoVRxRb2fJdAg8FHvI7cEyYdVAh94HH0UIK8oJxUfkjlDQN9RbMx+bEjP7+ggMiFRprSti032Oipxw== - -normalize-url@^3.3.0: - version "3.3.0" - resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-3.3.0.tgz#b2e1c4dc4f7c6d57743df733a4f5978d18650559" - integrity sha512-U+JJi7duF1o+u2pynbp2zXDW2/PADgC30f0GsHZtRh+HOcXHnw137TrNlyxxRvWW5fjKd3bcLHPxofWuCjaeZg== - -normalize-url@^4.1.0: - version "4.5.0" - resolved "https://registry.yarnpkg.com/normalize-url/-/normalize-url-4.5.0.tgz#453354087e6ca96957bd8f5baf753f5982142129" - integrity sha512-2s47yzUxdexf1OhyRi4Em83iQk0aPvwTddtFz4hnSSw9dCEsLEGf6SwIO8ss/19S9iBb5sJaOuTvTGDeZI00BQ== - -npm-run-path@^4.0.0: - version "4.0.1" - resolved "https://registry.yarnpkg.com/npm-run-path/-/npm-run-path-4.0.1.tgz#b7ecd1e5ed53da8e37a55e1c2269e0b97ed748ea" - integrity sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw== - dependencies: - path-key "^3.0.0" - -once@^1.3.0, once@^1.3.1, once@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1" - integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E= - dependencies: - wrappy "1" - -onetime@^5.1.0: - version "5.1.2" - resolved "https://registry.yarnpkg.com/onetime/-/onetime-5.1.2.tgz#d0e96ebb56b07476df1dd9c4806e5237985ca45e" - integrity sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg== - dependencies: - mimic-fn "^2.1.0" - -ora@5.1.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/ora/-/ora-5.1.0.tgz#b188cf8cd2d4d9b13fd25383bc3e5cba352c94f8" - integrity sha512-9tXIMPvjZ7hPTbk8DFq1f7Kow/HU/pQYB60JbNq+QnGwcyhWVZaQ4hM9zQDEsPxw/muLpgiHSaumUZxCAmod/w== - dependencies: - chalk "^4.1.0" - cli-cursor "^3.1.0" - cli-spinners "^2.4.0" - is-interactive "^1.0.0" - log-symbols "^4.0.0" - mute-stream "0.0.8" - strip-ansi "^6.0.0" - wcwidth "^1.0.1" - -os-name@4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/os-name/-/os-name-4.0.0.tgz#6c05c09c41c15848ea74658d12c9606f0f286599" - integrity sha512-caABzDdJMbtykt7GmSogEat3faTKQhmZf0BS5l/pZGmP0vPWQjXWqOhbLyK+b6j2/DQPmEvYdzLXJXXLJNVDNg== - dependencies: - macos-release "^2.2.0" - windows-release "^4.0.0" - -os-tmpdir@~1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/os-tmpdir/-/os-tmpdir-1.0.2.tgz#bbe67406c79aa85c5cfec766fe5734555dfa1274" - integrity sha1-u+Z0BseaqFxc/sdm/lc0VV36EnQ= - -p-cancelable@^1.0.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-1.1.0.tgz#d078d15a3af409220c886f1d9a0ca2e441ab26cc" - integrity sha512-s73XxOZ4zpt1edZYZzvhqFa6uvQc1vwUa0K0BdtIZgQMAJj9IbebH+JkgKZc9h+B05PKHLOTl4ajG1BmNrVZlw== - -p-cancelable@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-2.0.0.tgz#4a3740f5bdaf5ed5d7c3e34882c6fb5d6b266a6e" - integrity sha512-wvPXDmbMmu2ksjkB4Z3nZWTSkJEb9lqVdMaCKpZUGJG9TMiNp9XcbG3fn9fPKjem04fJMJnXoyFPk2FmgiaiNg== - -p-limit@^3.0.2: - version "3.0.2" - resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-3.0.2.tgz#1664e010af3cadc681baafd3e2a437be7b0fb5fe" - integrity sha512-iwqZSOoWIW+Ew4kAGUlN16J4M7OB3ysMLSZtnhmqx7njIHFPlxWBX8xo3lVTyFVq6mI/lL9qt2IsN1sHwaxJkg== - dependencies: - p-try "^2.0.0" - -p-locate@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-5.0.0.tgz#83c8315c6785005e3bd021839411c9e110e6d834" - integrity sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw== - dependencies: - p-limit "^3.0.2" - -p-try@^2.0.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" - integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== - -package-json@^6.3.0: - version "6.5.0" - resolved "https://registry.yarnpkg.com/package-json/-/package-json-6.5.0.tgz#6feedaca35e75725876d0b0e64974697fed145b0" - integrity sha512-k3bdm2n25tkyxcjSKzB5x8kfVxlMdgsbPr0GkZcwHsLpba6cBjqCt1KlcChKEvxHIcTB1FVMuwoijZ26xex5MQ== - dependencies: - got "^9.6.0" - registry-auth-token "^4.0.0" - registry-url "^5.0.0" - semver "^6.2.0" - -parent-module@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" - integrity sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g== - dependencies: - callsites "^3.0.0" - -parse-json@5.1.0, parse-json@^5.0.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.1.0.tgz#f96088cdf24a8faa9aea9a009f2d9d942c999646" - integrity sha512-+mi/lmVVNKFNVyLXV31ERiy2CY5E1/F6QtJFEzoChPRwwngMNXRDQ9GJ5WdE2Z2P4AujsOi0/+2qHID68KwfIQ== - dependencies: - "@babel/code-frame" "^7.0.0" - error-ex "^1.3.1" - json-parse-even-better-errors "^2.3.0" - lines-and-columns "^1.1.6" - -parse-path@^4.0.0: - version "4.0.2" - resolved "https://registry.yarnpkg.com/parse-path/-/parse-path-4.0.2.tgz#ef14f0d3d77bae8dd4bc66563a4c151aac9e65aa" - integrity sha512-HSqVz6iuXSiL8C1ku5Gl1Z5cwDd9Wo0q8CoffdAghP6bz8pJa1tcMC+m4N+z6VAS8QdksnIGq1TB6EgR4vPR6w== - dependencies: - is-ssh "^1.3.0" - protocols "^1.4.0" - -parse-url@^5.0.0: - version "5.0.2" - resolved "https://registry.yarnpkg.com/parse-url/-/parse-url-5.0.2.tgz#856a3be1fcdf78dc93fc8b3791f169072d898b59" - integrity sha512-Czj+GIit4cdWtxo3ISZCvLiUjErSo0iI3wJ+q9Oi3QuMYTI6OZu+7cewMWZ+C1YAnKhYTk6/TLuhIgCypLthPA== - dependencies: - is-ssh "^1.3.0" - normalize-url "^3.3.0" - parse-path "^4.0.0" - protocols "^1.4.0" - -path-exists@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3" - integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w== - -path-is-absolute@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/path-is-absolute/-/path-is-absolute-1.0.1.tgz#174b9268735534ffbc7ace6bf53a5a9e1b5c5f5f" - integrity sha1-F0uSaHNVNP+8es5r9TpanhtcX18= - -path-key@^3.0.0, path-key@^3.1.0: - version "3.1.1" - resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375" - integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q== - -path-parse@^1.0.6: - version "1.0.6" - resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.6.tgz#d62dbb5679405d72c4737ec58600e9ddcf06d24c" - integrity sha512-GSmOT2EbHrINBf9SR7CDELwlJ8AENk3Qn7OikK4nFYAu3Ote2+JYNVvkpAEQm3/TLNEJFD/xZJjzyxg3KBWOzw== - -path-type@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" - integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw== - -picomatch@^2.0.5, picomatch@^2.2.1: - version "2.2.2" - resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.2.2.tgz#21f333e9b6b8eaff02468f5146ea406d345f4dad" - integrity sha512-q0M/9eZHzmr0AulXyPwNfZjtwZ/RBZlbN3K3CErVrk50T2ASYI7Bye0EvekFY3IP1Nt2DHu0re+V2ZHIpMkuWg== - -prepend-http@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/prepend-http/-/prepend-http-2.0.0.tgz#e92434bfa5ea8c19f41cdfd401d741a3c819d897" - integrity sha1-6SQ0v6XqjBn0HN/UAddBo8gZ2Jc= - -protocols@^1.1.0, protocols@^1.4.0: - version "1.4.8" - resolved "https://registry.yarnpkg.com/protocols/-/protocols-1.4.8.tgz#48eea2d8f58d9644a4a32caae5d5db290a075ce8" - integrity sha512-IgjKyaUSjsROSO8/D49Ab7hP8mJgTYcqApOqdPhLoPxAplXmkp+zRvsrSQjFn5by0rhm4VH0GAUELIPpx7B1yg== - -pump@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/pump/-/pump-3.0.0.tgz#b4a2116815bde2f4e1ea602354e8c75565107a64" - integrity sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww== - dependencies: - end-of-stream "^1.1.0" - once "^1.3.1" - -pupa@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/pupa/-/pupa-2.0.1.tgz#dbdc9ff48ffbea4a26a069b6f9f7abb051008726" - integrity sha512-hEJH0s8PXLY/cdXh66tNEQGndDrIKNqNC5xmrysZy3i5C3oEoLna7YAOad+7u125+zH1HNXUmGEkrhb3c2VriA== - dependencies: - escape-goat "^2.0.0" - -quick-lru@^5.1.1: - version "5.1.1" - resolved "https://registry.yarnpkg.com/quick-lru/-/quick-lru-5.1.1.tgz#366493e6b3e42a3a6885e2e99d18f80fb7a8c932" - integrity sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA== - -rc@^1.2.8: - version "1.2.8" - resolved "https://registry.yarnpkg.com/rc/-/rc-1.2.8.tgz#cd924bf5200a075b83c188cd6b9e211b7fc0d3ed" - integrity sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw== - dependencies: - deep-extend "^0.6.0" - ini "~1.3.0" - minimist "^1.2.0" - strip-json-comments "~2.0.1" - -rechoir@^0.6.2: - version "0.6.2" - resolved "https://registry.yarnpkg.com/rechoir/-/rechoir-0.6.2.tgz#85204b54dba82d5742e28c96756ef43af50e3384" - integrity sha1-hSBLVNuoLVdC4oyWdW70OvUOM4Q= - dependencies: - resolve "^1.1.6" - -registry-auth-token@^4.0.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/registry-auth-token/-/registry-auth-token-4.2.0.tgz#1d37dffda72bbecd0f581e4715540213a65eb7da" - integrity sha512-P+lWzPrsgfN+UEpDS3U8AQKg/UjZX6mQSJueZj3EK+vNESoqBSpBUD3gmu4sF9lOsjXWjF11dQKUqemf3veq1w== - dependencies: - rc "^1.2.8" - -registry-url@^5.0.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/registry-url/-/registry-url-5.1.0.tgz#e98334b50d5434b81136b44ec638d9c2009c5009" - integrity sha512-8acYXXTI0AkQv6RAOjE3vOaIXZkT9wo4LOFbBKYQEEnnMNBpKqdUrI6S4NT0KPIo/WVvJ5tE/X5LF/TQUf0ekw== - dependencies: - rc "^1.2.8" - -release-it@^14.1.0: - version "14.1.0" - resolved "https://registry.yarnpkg.com/release-it/-/release-it-14.1.0.tgz#44dce71e35fcd57e0518c6dfdca1486a0c2889e6" - integrity sha512-S3yFUl7dv4D5+/Gqe7Qi3zKKDlH5KtrrZfSiNX6QanQ1B21htfzGWfU+JmE89aKJgZPmNG7W0vuiSGXoRzbFZA== - dependencies: - "@iarna/toml" "2.2.5" - "@octokit/rest" "18.0.6" - async-retry "1.3.1" - chalk "4.1.0" - cosmiconfig "7.0.0" - debug "4.2.0" - deprecated-obj "2.0.0" - execa "4.0.3" - find-up "5.0.0" - form-data "3.0.0" - git-url-parse "11.3.0" - globby "11.0.1" - got "11.7.0" - import-cwd "3.0.0" - inquirer "7.3.3" - is-ci "2.0.0" - lodash "4.17.20" - mime-types "2.1.27" - ora "5.1.0" - os-name "4.0.0" - parse-json "5.1.0" - semver "7.3.2" - shelljs "0.8.4" - update-notifier "5.0.0" - url-join "4.0.1" - uuid "8.3.1" - yaml "1.10.0" - yargs-parser "20.2.1" - -resolve-alpn@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/resolve-alpn/-/resolve-alpn-1.0.0.tgz#745ad60b3d6aff4b4a48e01b8c0bdc70959e0e8c" - integrity sha512-rTuiIEqFmGxne4IovivKSDzld2lWW9QCjqv80SYjPgf+gS35eaCAjaP54CCwGAwBtnCsvNLYtqxe1Nw+i6JEmA== - -resolve-from@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-4.0.0.tgz#4abcd852ad32dd7baabfe9b40e00a36db5f392e6" - integrity sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g== - -resolve-from@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/resolve-from/-/resolve-from-5.0.0.tgz#c35225843df8f776df21c57557bc087e9dfdfc69" - integrity sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw== - -resolve@^1.1.6: - version "1.17.0" - resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.17.0.tgz#b25941b54968231cc2d1bb76a79cb7f2c0bf8444" - integrity sha512-ic+7JYiV8Vi2yzQGFWOkiZD5Z9z7O2Zhm9XMaTxdJExKasieFCr+yXZ/WmXsckHiKl12ar0y6XiXDx3m4RHn1w== - dependencies: - path-parse "^1.0.6" - -responselike@^1.0.2: - version "1.0.2" - resolved "https://registry.yarnpkg.com/responselike/-/responselike-1.0.2.tgz#918720ef3b631c5642be068f15ade5a46f4ba1e7" - integrity sha1-kYcg7ztjHFZCvgaPFa3lpG9Loec= - dependencies: - lowercase-keys "^1.0.0" - -responselike@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/responselike/-/responselike-2.0.0.tgz#26391bcc3174f750f9a79eacc40a12a5c42d7723" - integrity sha512-xH48u3FTB9VsZw7R+vvgaKeLKzT6jOogbQhEe/jewwnZgzPcnyWui2Av6JpoYZF/91uueC+lqhWqeURw5/qhCw== - dependencies: - lowercase-keys "^2.0.0" - -restore-cursor@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/restore-cursor/-/restore-cursor-3.1.0.tgz#39f67c54b3a7a58cea5236d95cf0034239631f7e" - integrity sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA== - dependencies: - onetime "^5.1.0" - signal-exit "^3.0.2" - -retry@0.12.0: - version "0.12.0" - resolved "https://registry.yarnpkg.com/retry/-/retry-0.12.0.tgz#1b42a6266a21f07421d1b0b54b7dc167b01c013b" - integrity sha1-G0KmJmoh8HQh0bC1S33BZ7AcATs= - -reusify@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76" - integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw== - -run-async@^2.4.0: - version "2.4.1" - resolved "https://registry.yarnpkg.com/run-async/-/run-async-2.4.1.tgz#8440eccf99ea3e70bd409d49aab88e10c189a455" - integrity sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ== - -run-parallel@^1.1.9: - version "1.1.9" - resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.1.9.tgz#c9dd3a7cf9f4b2c4b6244e173a6ed866e61dd679" - integrity sha512-DEqnSRTDw/Tc3FXf49zedI638Z9onwUotBMiUFKmrO2sdFKIbXamXGQ3Axd4qgphxKB4kw/qP1w5kTxnfU1B9Q== - -rxjs@^6.6.0: - version "6.6.3" - resolved "https://registry.yarnpkg.com/rxjs/-/rxjs-6.6.3.tgz#8ca84635c4daa900c0d3967a6ee7ac60271ee552" - integrity sha512-trsQc+xYYXZ3urjOiJOuCOa5N3jAZ3eiSpQB5hIT8zGlL2QfnHLJ2r7GMkBGuIausdJN1OneaI6gQlsqNHHmZQ== - dependencies: - tslib "^1.9.0" - -"safer-buffer@>= 2.1.2 < 3": - version "2.1.2" - resolved "https://registry.yarnpkg.com/safer-buffer/-/safer-buffer-2.1.2.tgz#44fa161b0187b9549dd84bb91802f9bd8385cd6a" - integrity sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg== - -semver-diff@^3.1.1: - version "3.1.1" - resolved "https://registry.yarnpkg.com/semver-diff/-/semver-diff-3.1.1.tgz#05f77ce59f325e00e2706afd67bb506ddb1ca32b" - integrity sha512-GX0Ix/CJcHyB8c4ykpHGIAvLyOwOobtM/8d+TQkAd81/bEjgPHrfba41Vpesr7jX/t8Uh+R3EX9eAS5be+jQYg== - dependencies: - semver "^6.3.0" - -semver@7.3.2, semver@^7.3.2: - version "7.3.2" - resolved "https://registry.yarnpkg.com/semver/-/semver-7.3.2.tgz#604962b052b81ed0786aae84389ffba70ffd3938" - integrity sha512-OrOb32TeeambH6UrhtShmF7CRDqhL6/5XpPNp2DuRH6+9QLw/orhp72j87v8Qa1ScDkvrrBNpZcDejAirJmfXQ== - -semver@^6.0.0, semver@^6.2.0, semver@^6.3.0: - version "6.3.0" - resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.0.tgz#ee0a64c8af5e8ceea67687b133761e1becbd1d3d" - integrity sha512-b39TBaTSfV6yBrapU89p5fKekE2m/NwnDocOVruQFS1/veMgdzuPcnOM34M6CwxW8jH/lxEa5rBoDeUwu5HHTw== - -shebang-command@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea" - integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA== - dependencies: - shebang-regex "^3.0.0" - -shebang-regex@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172" - integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A== - -shelljs@0.8.4: - version "0.8.4" - resolved "https://registry.yarnpkg.com/shelljs/-/shelljs-0.8.4.tgz#de7684feeb767f8716b326078a8a00875890e3c2" - integrity sha512-7gk3UZ9kOfPLIAbslLzyWeGiEqx9e3rxwZM0KE6EL8GlGwjym9Mrlx5/p33bWTu9YG6vcS4MBxYZDHYr5lr8BQ== - dependencies: - glob "^7.0.0" - interpret "^1.0.0" - rechoir "^0.6.2" - -signal-exit@^3.0.2: - version "3.0.3" - resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-3.0.3.tgz#a1410c2edd8f077b08b4e253c8eacfcaf057461c" - integrity sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA== - -slash@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634" - integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q== - -string-width@^3.0.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-3.1.0.tgz#22767be21b62af1081574306f69ac51b62203961" - integrity sha512-vafcv6KjVZKSgz06oM/H6GDBrAtz8vdhQakGjFIvNrHA6y3HCF1CInLy+QLq8dTJPQ1b+KDUqDFctkdRW44e1w== - dependencies: - emoji-regex "^7.0.1" - is-fullwidth-code-point "^2.0.0" - strip-ansi "^5.1.0" - -string-width@^4.0.0, string-width@^4.1.0: - version "4.2.0" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.0.tgz#952182c46cc7b2c313d1596e623992bd163b72b5" - integrity sha512-zUz5JD+tgqtuDjMhwIg5uFVV3dtqZ9yQJlZVfq4I01/K5Paj5UHj7VyrQOJvzawSVlKpObApbfD0Ed6yJc+1eg== - dependencies: - emoji-regex "^8.0.0" - is-fullwidth-code-point "^3.0.0" - strip-ansi "^6.0.0" - -strip-ansi@^5.1.0: - version "5.2.0" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-5.2.0.tgz#8c9a536feb6afc962bdfa5b104a5091c1ad9c0ae" - integrity sha512-DuRs1gKbBqsMKIZlrffwlug8MHkcnpjs5VPmL1PAh+mA30U0DTotfDZ0d2UUsXpPmPmMMJ6W773MaA3J+lbiWA== - dependencies: - ansi-regex "^4.1.0" - -strip-ansi@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.0.tgz#0b1571dd7669ccd4f3e06e14ef1eed26225ae532" - integrity sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w== - dependencies: - ansi-regex "^5.0.0" - -strip-final-newline@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/strip-final-newline/-/strip-final-newline-2.0.0.tgz#89b852fb2fcbe936f6f4b3187afb0a12c1ab58ad" - integrity sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA== - -strip-json-comments@~2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" - integrity sha1-PFMZQukIwml8DsNEhYwobHygpgo= - -supports-color@^5.3.0: - version "5.5.0" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f" - integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow== - dependencies: - has-flag "^3.0.0" - -supports-color@^7.1.0: - version "7.2.0" - resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" - integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== - dependencies: - has-flag "^4.0.0" - -term-size@^2.1.0: - version "2.2.0" - resolved "https://registry.yarnpkg.com/term-size/-/term-size-2.2.0.tgz#1f16adedfe9bdc18800e1776821734086fcc6753" - integrity sha512-a6sumDlzyHVJWb8+YofY4TW112G6p2FCPEAFk+59gIYHv3XHRhm9ltVQ9kli4hNWeQBwSpe8cRN25x0ROunMOw== - -through@^2.3.6: - version "2.3.8" - resolved "https://registry.yarnpkg.com/through/-/through-2.3.8.tgz#0dd4c9ffaabc357960b1b724115d7e0e86a2e1f5" - integrity sha1-DdTJ/6q8NXlgsbckEV1+Doai4fU= - -tmp@^0.0.33: - version "0.0.33" - resolved "https://registry.yarnpkg.com/tmp/-/tmp-0.0.33.tgz#6d34335889768d21b2bcda0aa277ced3b1bfadf9" - integrity sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw== - dependencies: - os-tmpdir "~1.0.2" - -to-readable-stream@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/to-readable-stream/-/to-readable-stream-1.0.0.tgz#ce0aa0c2f3df6adf852efb404a783e77c0475771" - integrity sha512-Iq25XBt6zD5npPhlLVXGFN3/gyR2/qODcKNNyTMd4vbm39HUaOiAM4PMq0eMVC/Tkxz+Zjdsc55g9yyz+Yq00Q== - -to-regex-range@^5.0.1: - version "5.0.1" - resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4" - integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ== - dependencies: - is-number "^7.0.0" - -tslib@^1.9.0: - version "1.14.1" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" - integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== - -tunnel@0.0.6: - version "0.0.6" - resolved "https://registry.yarnpkg.com/tunnel/-/tunnel-0.0.6.tgz#72f1314b34a5b192db012324df2cc587ca47f92c" - integrity sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg== - -type-fest@^0.11.0: - version "0.11.0" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.11.0.tgz#97abf0872310fed88a5c466b25681576145e33f1" - integrity sha512-OdjXJxnCN1AvyLSzeKIgXTXxV+99ZuXl3Hpo9XpJAv9MBcHrrJOQ5kV7ypXOuQie+AmWG25hLbiKdwYTifzcfQ== - -type-fest@^0.8.1: - version "0.8.1" - resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.8.1.tgz#09e249ebde851d3b1e48d27c105444667f17b83d" - integrity sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA== - -typedarray-to-buffer@^3.1.5: - version "3.1.5" - resolved "https://registry.yarnpkg.com/typedarray-to-buffer/-/typedarray-to-buffer-3.1.5.tgz#a97ee7a9ff42691b9f783ff1bc5112fe3fca9080" - integrity sha512-zdu8XMNEDepKKR+XYOXAVPtWui0ly0NtohUscw+UmaHiAWT8hrV1rr//H6V+0DvJ3OQ19S979M0laLfX8rm82Q== - dependencies: - is-typedarray "^1.0.0" - -typescript@^4.3.5: - version "4.3.5" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.3.5.tgz#4d1c37cc16e893973c45a06886b7113234f119f4" - integrity sha512-DqQgihaQ9cUrskJo9kIyW/+g0Vxsk8cDtZ52a3NGh0YNTfpUSArXSohyUGnvbPazEPLu398C0UxmKSOrPumUzA== - -unique-string@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/unique-string/-/unique-string-2.0.0.tgz#39c6451f81afb2749de2b233e3f7c5e8843bd89d" - integrity sha512-uNaeirEPvpZWSgzwsPGtU2zVSTrn/8L5q/IexZmH0eH6SA73CmAA5U4GwORTxQAZs95TAXLNqeLoPPNO5gZfWg== - dependencies: - crypto-random-string "^2.0.0" - -universal-user-agent@^6.0.0: - version "6.0.0" - resolved "https://registry.yarnpkg.com/universal-user-agent/-/universal-user-agent-6.0.0.tgz#3381f8503b251c0d9cd21bc1de939ec9df5480ee" - integrity sha512-isyNax3wXoKaulPDZWHQqbmIx1k2tb9fb3GGDBRxCscfYV2Ch7WxPArBsFEG8s/safwXTT7H4QGhaIkTp9447w== - -update-notifier@5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/update-notifier/-/update-notifier-5.0.0.tgz#308e0ae772f71d66df0303159a945bc1e1fb819a" - integrity sha512-8tqsiVrMv7aZsKNSjqA6DdBLKJpZG1hRpkj1RbOJu1PgyP69OX+EInAnP1EK/ShX5YdPFgwWdk19oquZ0HTM8g== - dependencies: - boxen "^4.2.0" - chalk "^4.1.0" - configstore "^5.0.1" - has-yarn "^2.1.0" - import-lazy "^2.1.0" - is-ci "^2.0.0" - is-installed-globally "^0.3.1" - is-npm "^5.0.0" - is-yarn-global "^0.3.0" - latest-version "^5.0.0" - pupa "^2.0.1" - semver "^7.3.2" - semver-diff "^3.1.1" - xdg-basedir "^4.0.0" - -url-join@4.0.1: - version "4.0.1" - resolved "https://registry.yarnpkg.com/url-join/-/url-join-4.0.1.tgz#b642e21a2646808ffa178c4c5fda39844e12cde7" - integrity sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA== - -url-parse-lax@^3.0.0: - version "3.0.0" - resolved "https://registry.yarnpkg.com/url-parse-lax/-/url-parse-lax-3.0.0.tgz#16b5cafc07dbe3676c1b1999177823d6503acb0c" - integrity sha1-FrXK/Afb42dsGxmZF3gj1lA6yww= - dependencies: - prepend-http "^2.0.0" - -uuid@8.3.1: - version "8.3.1" - resolved "https://registry.yarnpkg.com/uuid/-/uuid-8.3.1.tgz#2ba2e6ca000da60fce5a196954ab241131e05a31" - integrity sha512-FOmRr+FmWEIG8uhZv6C2bTgEVXsHk08kE7mPlrBbEe+c3r9pjceVPgupIfNIhc4yx55H69OXANrUaSuu9eInKg== - -wcwidth@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/wcwidth/-/wcwidth-1.0.1.tgz#f0b0dcf915bc5ff1528afadb2c0e17b532da2fe8" - integrity sha1-8LDc+RW8X/FSivrbLA4XtTLaL+g= - dependencies: - defaults "^1.0.3" - -which@^2.0.1: - version "2.0.2" - resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1" - integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA== - dependencies: - isexe "^2.0.0" - -widest-line@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/widest-line/-/widest-line-3.1.0.tgz#8292333bbf66cb45ff0de1603b136b7ae1496eca" - integrity sha512-NsmoXalsWVDMGupxZ5R08ka9flZjjiLvHVAWYOKtiKM8ujtZWr9cRffak+uSE48+Ob8ObalXpwyeUiyDD6QFgg== - dependencies: - string-width "^4.0.0" - -windows-release@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/windows-release/-/windows-release-4.0.0.tgz#4725ec70217d1bf6e02c7772413b29cdde9ec377" - integrity sha512-OxmV4wzDKB1x7AZaZgXMVsdJ1qER1ed83ZrTYd5Bwq2HfJVg3DJS8nqlAG4sMoJ7mu8cuRmLEYyU13BKwctRAg== - dependencies: - execa "^4.0.2" - -wrappy@1: - version "1.0.2" - resolved "https://registry.yarnpkg.com/wrappy/-/wrappy-1.0.2.tgz#b5243d8f3ec1aa35f1364605bc0d1036e30ab69f" - integrity sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8= - -write-file-atomic@^3.0.0: - version "3.0.3" - resolved "https://registry.yarnpkg.com/write-file-atomic/-/write-file-atomic-3.0.3.tgz#56bd5c5a5c70481cd19c571bd39ab965a5de56e8" - integrity sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q== - dependencies: - imurmurhash "^0.1.4" - is-typedarray "^1.0.0" - signal-exit "^3.0.2" - typedarray-to-buffer "^3.1.5" - -xdg-basedir@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/xdg-basedir/-/xdg-basedir-4.0.0.tgz#4bc8d9984403696225ef83a1573cbbcb4e79db13" - integrity sha512-PSNhEJDejZYV7h50BohL09Er9VaIefr2LMAf3OEmpCkjOi34eYyQYAXUTjEQtZJTKcF0E2UKTh+osDLsgNim9Q== - -yaml@1.10.0, yaml@^1.10.0: - version "1.10.0" - resolved "https://registry.yarnpkg.com/yaml/-/yaml-1.10.0.tgz#3b593add944876077d4d683fee01081bd9fff31e" - integrity sha512-yr2icI4glYaNG+KWONODapy2/jDdMSDnrONSjblABjD9B4Z5LgiircSt8m8sRZFNi08kG9Sm0uSHtEmP3zaEGg== - -yargs-parser@20.2.1: - version "20.2.1" - resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.1.tgz#28f3773c546cdd8a69ddae68116b48a5da328e77" - integrity sha512-yYsjuSkjbLMBp16eaOt7/siKTjNVjMm3SoJnIg3sEh/JsvqVVDyjRKmaJV4cl+lNIgq6QEco2i3gDebJl7/vLA==