#!/bin/bash set -e set -E # cause 'trap funcname ERR' to be inherited by child commands, see https://stackoverflow.com/questions/35800082/how-to-trap-err-when-using-set-e-in-bash MASTER=1 DIR=. KURL_URL="https://kurl.sh" DIST_URL="https://kurl-sh.s3.amazonaws.com/dist" INSTALLER_ID="latest" KURL_VERSION="v2021.09.22-1" CRICTL_VERSION=1.20.0 REPLICATED_APP_URL="https://replicated.app" KURL_UTIL_IMAGE="replicated/kurl-util:v2021.09.22-1" KURL_BIN_UTILS_FILE="kurl-bin-utils-v2021.09.22-1.tar.gz" # STEP_VERSIONS array is generated by the server and injected at runtime based on supported k8s versions STEP_VERSIONS=(0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 1.16.4 1.17.13 1.18.20 1.19.15 1.20.11 1.21.5) INSTALLER_YAML="apiVersion: cluster.kurl.sh/v1beta1 kind: Installer metadata: name: latest spec: kubernetes: version: 1.19.15 docker: version: 20.10.5 weave: version: 2.6.5 rook: version: 1.0.4 contour: version: 1.18.0 registry: version: 2.7.1 prometheus: version: 0.49.0-17.1.3 ekco: version: 0.12.0 " # shellcheck disable=SC2148 # no shebang as this is a composite script function kurl_init_config() { if kubernetes_resource_exists kurl configmap kurl-current-config; then kubectl delete configmap -n kurl kurl-last-config || true kubectl get configmap -n kurl -o json kurl-current-config | sed 's/kurl-current-config/kurl-last-config/g' | kubectl apply -f - kubectl delete configmap -n kurl kurl-current-config || true else kubectl create configmap -n kurl kurl-last-config fi kubectl create configmap -n kurl kurl-current-config kurl_set_current_version } function kurl_set_current_version() { if [ -z "${KURL_VERSION}" ]; then return fi kubectl patch configmaps -n kurl kurl-current-config --type merge -p "{\"data\":{\"kurl-version\":\"${KURL_VERSION}\"}}" } function kurl_get_current_version() { kubectl get configmap -n kurl kurl-current-config -o jsonpath="{.data.kurl-version}" } ADDONS_HAVE_HOST_COMPONENTS=0 function addon_install() { local name=$1 local version=$2 if [ -z "$version" ]; then return 0 fi logStep "Addon $name $version" report_addon_start "$name" "$version" rm -rf $DIR/kustomize/$name mkdir -p $DIR/kustomize/$name # if the addon has already been applied and addons are not being forcibly reapplied if addon_has_been_applied $name && [ -z "$FORCE_REAPPLY_ADDONS" ]; then export REPORTING_CONTEXT_INFO="addon already applied $name $version" # shellcheck disable=SC1090 . $DIR/addons/$name/$version/install.sh if commandExists ${name}_already_applied; then ${name}_already_applied fi export REPORTING_CONTEXT_INFO="" else export REPORTING_CONTEXT_INFO="addon $name $version" # shellcheck disable=SC1090 . $DIR/addons/$name/$version/install.sh # containerd is a special case because there is also a binary named containerd on the host if [ "$name" = "containerd" ]; then containerd_install else $name fi export REPORTING_CONTEXT_INFO="" fi addon_set_has_been_applied $name if commandExists ${name}_join; then ADDONS_HAVE_HOST_COMPONENTS=1 fi if [ "$name" = "containerd" ]; then ADDONS_HAVE_HOST_COMPONENTS=1 fi report_addon_success "$name" "$version" } function addon_fetch() { local name=$1 local version=$2 local s3Override=$3 if [ -z "$version" ]; then return 0 fi if [ "$AIRGAP" != "1" ]; then if [ -n "$s3Override" ]; then rm -rf $DIR/addons/$name/$version # Cleanup broken/incompatible addons from failed runs addon_fetch_no_cache "$s3Override" elif [ -n "$DIST_URL" ]; then rm -rf $DIR/addons/$name/$version # Cleanup broken/incompatible addons from failed runs addon_fetch_cache "$name-$version.tar.gz" fi fi . $DIR/addons/$name/$version/install.sh } function addon_pre_init() { local name=$1 if commandExists ${name}_pre_init; then ${name}_pre_init fi } function addon_preflight() { local name=$1 if commandExists ${name}_preflight; then ${name}_preflight fi } function addon_join() { local name=$1 local version=$2 addon_load "$name" "$version" if commandExists ${name}_join; then logStep "Addon $name $version" ${name}_join fi } function addon_load() { local name=$1 local version=$2 if [ -z "$version" ]; then return 0 fi load_images $DIR/addons/$name/$version/images } function addon_fetch_no_cache() { local url=$1 local archiveName=$(basename $url) echo "Fetching $archiveName" curl -LO "$url" tar xf $archiveName rm $archiveName } function addon_fetch_cache() { local package=$1 package_download "${package}" tar xf "$(package_filepath "${package}")" # rm $archiveName } function addon_outro() { if [ -n "$PROXY_ADDRESS" ]; then ADDONS_HAVE_HOST_COMPONENTS=1 fi if [ "$ADDONS_HAVE_HOST_COMPONENTS" = "1" ] && kubernetes_has_remotes; then local common_flags common_flags="${common_flags}$(get_docker_registry_ip_flag "${DOCKER_REGISTRY_IP}")" common_flags="${common_flags}$(get_additional_no_proxy_addresses_flag "${PROXY_ADDRESS}" "${SERVICE_CIDR},${POD_CIDR}")" common_flags="${common_flags}$(get_kurl_install_directory_flag "${KURL_INSTALL_DIRECTORY_FLAG}")" common_flags="${common_flags}$(get_force_reapply_addons_flag)" printf "\n${YELLOW}Run this script on all remote nodes to apply changes${NC}\n" if [ "$AIRGAP" = "1" ]; then printf "\n\t${GREEN}cat ./upgrade.sh | sudo bash -s airgap${common_flags}${NC}\n\n" else local prefix= prefix="$(build_installer_prefix "${INSTALLER_ID}" "${KURL_VERSION}" "${KURL_URL}" "${PROXY_ADDRESS}")" printf "\n\t${GREEN}${prefix}upgrade.sh | sudo bash -s${common_flags}${NC}\n\n" fi if [ "${KURL_IGNORE_REMOTE_UPGRADE_PROMPT}" != "1" ]; then if prompts_can_prompt ; then echo "Press enter to proceed" prompt fi else logWarn "Remote upgrade script prompt explicitly ignored" fi fi while read -r name; do if commandExists ${name}_outro; then ${name}_outro fi done < <(find addons/ -mindepth 1 -maxdepth 1 -type d -printf '%f\n') } function addon_cleanup() { rm -rf "${DIR}/addons" } function addon_has_been_applied() { local name=$1 if [ "$name" = "containerd" ]; then if [ -f $DIR/containerd-last-applied ]; then last_applied=$(cat $DIR/containerd-last-applied) fi else last_applied=$(kubectl get configmap -n kurl kurl-last-config -o jsonpath="{.data.addons-$name}") fi current=$(get_addon_config "$name" | base64 -w 0) if [[ "$current" == "" ]] ; then # current should never be the empty string - it should at least contain the version - so this indicates an error # it would be better to reinstall unnecessarily rather than skip installing, so we report that the addon has not been applied return 1 fi if [[ "$last_applied" == "$current" ]] ; then return 0 fi return 1 } function addon_set_has_been_applied() { local name=$1 current=$(get_addon_config "$name" | base64 -w 0) if [ "$name" = "containerd" ]; then echo "$current" > $DIR/containerd-last-applied else kubectl patch configmaps -n kurl kurl-current-config --type merge -p "{\"data\":{\"addons-$name\":\"$current\"}}" fi } GREEN='\033[0;32m' BLUE='\033[0;94m' LIGHT_BLUE='\033[0;34m' YELLOW='\033[0;33m' RED='\033[0;31m' NC='\033[0m' # No Color KUBEADM_CONF_DIR=/opt/replicated KUBEADM_CONF_FILE="$KUBEADM_CONF_DIR/kubeadm.conf" commandExists() { command -v "$@" > /dev/null 2>&1 } function get_dist_url() { if [ -n "${KURL_VERSION}" ]; then echo "${DIST_URL}/${KURL_VERSION}" else echo "${DIST_URL}" fi } function package_download() { local package="$1" if [ -z "${DIST_URL}" ]; then logWarn "DIST_URL not set, will not download $1" return fi mkdir -p assets touch assets/Manifest local etag="$(cat assets/Manifest | grep "${package}" | awk 'NR == 1 {print $2}')" local checksum="$(cat assets/Manifest | grep "${package}" | awk 'NR == 1 {print $3}')" if [ -n "${etag}" ] && ! package_matches_checksum "${package}" "${checksum}" ; then etag= fi local newetag="$(curl -IfsSL "$(get_dist_url)/${package}" | grep -i 'etag:' | sed -r 's/.*"(.*)".*/\1/')" if [ -n "${etag}" ] && [ "${etag}" = "${newetag}" ]; then echo "Package ${package} already exists, not downloading" return fi sed -i "/^$(printf '%s' "${package}").*/d" assets/Manifest # remove from manifest local filepath="$(package_filepath "${package}")" echo "Downloading package ${package}" curl -fL -o "${filepath}" "$(get_dist_url)/${package}" checksum="$(md5sum "${filepath}" | awk '{print $1}')" echo "${package} ${newetag} ${checksum}" >> assets/Manifest } function package_filepath() { local package="$1" echo "assets/${package}" } function package_matches_checksum() { local package="$1" local checksum="$2" local filepath="$(package_filepath "${package}")" if [ -z "${checksum}" ]; then return 1 elif [ ! -f "${filepath}" ] || [ ! -s "${filepath}" ]; then # if not exists or empty return 1 elif ! md5sum "${filepath}" | grep -Fq "${checksum}" ; then echo "Package ${package} checksum does not match" return 1 fi return 0 } function package_cleanup() { if [ -z "${DIST_URL}" ] || [ "${AIRGAP}" = "1" ]; then return fi addon_cleanup rm -rf "${DIR}/packages" } insertOrReplaceJsonParam() { if ! [ -f "$1" ]; then # If settings file does not exist mkdir -p "$(dirname "$1")" echo "{\"$2\": \"$3\"}" > "$1" else # Settings file exists if grep -q -E "\"$2\" *: *\"[^\"]*\"" "$1"; then # If settings file contains named setting, replace it sed -i -e "s/\"$2\" *: *\"[^\"]*\"/\"$2\": \"$3\"/g" "$1" else # Insert into settings file (with proper commas) if [ $(wc -c <"$1") -ge 5 ]; then # File long enough to actually have an entry, insert "name": "value",\n after first { _commonJsonReplaceTmp="$(awk "NR==1,/^{/{sub(/^{/, \"{\\\"$2\\\": \\\"$3\\\", \")} 1" "$1")" echo "$_commonJsonReplaceTmp" > "$1" else # file not long enough to actually have contents, replace wholesale echo "{\"$2\": \"$3\"}" > "$1" fi fi fi } semverParse() { major="${1%%.*}" minor="${1#$major.}" minor="${minor%%.*}" patch="${1#$major.$minor.}" patch="${patch%%[-.]*}" } SEMVER_COMPARE_RESULT= semverCompare() { semverParse "$1" _a_major="${major:-0}" _a_minor="${minor:-0}" _a_patch="${patch:-0}" semverParse "$2" _b_major="${major:-0}" _b_minor="${minor:-0}" _b_patch="${patch:-0}" if [ "$_a_major" -lt "$_b_major" ]; then SEMVER_COMPARE_RESULT=-1 return fi if [ "$_a_major" -gt "$_b_major" ]; then SEMVER_COMPARE_RESULT=1 return fi if [ "$_a_minor" -lt "$_b_minor" ]; then SEMVER_COMPARE_RESULT=-1 return fi if [ "$_a_minor" -gt "$_b_minor" ]; then SEMVER_COMPARE_RESULT=1 return fi if [ "$_a_patch" -lt "$_b_patch" ]; then SEMVER_COMPARE_RESULT=-1 return fi if [ "$_a_patch" -gt "$_b_patch" ]; then SEMVER_COMPARE_RESULT=1 return fi SEMVER_COMPARE_RESULT=0 } log() { printf "%s\n" "$1" 1>&2 } logSuccess() { printf "${GREEN}✔ $1${NC}\n" 1>&2 } logStep() { printf "${BLUE}⚙ $1${NC}\n" 1>&2 } logSubstep() { printf "\t${LIGHT_BLUE}- $1${NC}\n" 1>&2 } logFail() { printf "${RED}$1${NC}\n" 1>&2 } logWarn() { printf "${YELLOW}$1${NC}\n" 1>&2 } bail() { logFail "$@" exit 1 } function wait_for_nodes() { if ! spinner_until 120 get_nodes_succeeds ; then # this should exit script on non-zero exit code and print error message kubectl get nodes 1>/dev/null fi } function get_nodes_succeeds() { kubectl get nodes >/dev/null 2>&1 } function wait_for_default_namespace() { if ! spinner_until 120 has_default_namespace ; then kubectl get ns bail "No default namespace detected" fi } function has_default_namespace() { kubectl get ns | grep -q '^default' 2>/dev/null } # Label nodes as provisioned by kurl installation # (these labels should have been added by kurl installation. # See kubeadm-init and kubeadm-join yamk files. # This bit will ensure the labels are added for pre-existing cluster # during a kurl upgrade.) labelNodes() { for NODE in $(kubectl get nodes --no-headers | awk '{print $1}');do kurl_label=$(kubectl describe nodes $NODE | grep "kurl.sh\/cluster=true") || true if [[ -z $kurl_label ]];then kubectl label node --overwrite $NODE kurl.sh/cluster=true; fi done } spinnerPodRunning() { namespace=$1 podPrefix=$2 local delay=0.75 local spinstr='|/-\' while ! kubectl -n "$namespace" get pods 2>/dev/null | grep "^$podPrefix" | awk '{ print $3}' | grep '^Running$' > /dev/null ; do local temp=${spinstr#?} printf " [%c] " "$spinstr" local spinstr=$temp${spinstr%"$temp"} sleep $delay printf "\b\b\b\b\b\b" done printf " \b\b\b\b" } COMPARE_DOCKER_VERSIONS_RESULT= compareDockerVersions() { # reset COMPARE_DOCKER_VERSIONS_RESULT= compareDockerVersionsIgnorePatch "$1" "$2" if [ "$COMPARE_DOCKER_VERSIONS_RESULT" -ne "0" ]; then return fi parseDockerVersion "$1" _a_patch="$DOCKER_VERSION_PATCH" parseDockerVersion "$2" _b_patch="$DOCKER_VERSION_PATCH" if [ "$_a_patch" -lt "$_b_patch" ]; then COMPARE_DOCKER_VERSIONS_RESULT=-1 return fi if [ "$_a_patch" -gt "$_b_patch" ]; then COMPARE_DOCKER_VERSIONS_RESULT=1 return fi COMPARE_DOCKER_VERSIONS_RESULT=0 } COMPARE_DOCKER_VERSIONS_RESULT= compareDockerVersionsIgnorePatch() { # reset COMPARE_DOCKER_VERSIONS_RESULT= parseDockerVersion "$1" _a_major="$DOCKER_VERSION_MAJOR" _a_minor="$DOCKER_VERSION_MINOR" parseDockerVersion "$2" _b_major="$DOCKER_VERSION_MAJOR" _b_minor="$DOCKER_VERSION_MINOR" if [ "$_a_major" -lt "$_b_major" ]; then COMPARE_DOCKER_VERSIONS_RESULT=-1 return fi if [ "$_a_major" -gt "$_b_major" ]; then COMPARE_DOCKER_VERSIONS_RESULT=1 return fi if [ "$_a_minor" -lt "$_b_minor" ]; then COMPARE_DOCKER_VERSIONS_RESULT=-1 return fi if [ "$_a_minor" -gt "$_b_minor" ]; then COMPARE_DOCKER_VERSIONS_RESULT=1 return fi COMPARE_DOCKER_VERSIONS_RESULT=0 } DOCKER_VERSION_MAJOR= DOCKER_VERSION_MINOR= DOCKER_VERSION_PATCH= DOCKER_VERSION_RELEASE= parseDockerVersion() { # reset DOCKER_VERSION_MAJOR= DOCKER_VERSION_MINOR= DOCKER_VERSION_PATCH= DOCKER_VERSION_RELEASE= if [ -z "$1" ]; then return fi OLD_IFS="$IFS" && IFS=. && set -- $1 && IFS="$OLD_IFS" DOCKER_VERSION_MAJOR=$1 DOCKER_VERSION_MINOR=$2 OLD_IFS="$IFS" && IFS=- && set -- $3 && IFS="$OLD_IFS" DOCKER_VERSION_PATCH=$1 DOCKER_VERSION_RELEASE=$2 } exportKubeconfig() { local kubeconfig kubeconfig="$(${K8S_DISTRO}_get_kubeconfig)" current_user_sudo_group if [ -n "$FOUND_SUDO_GROUP" ]; then chown root:$FOUND_SUDO_GROUP ${kubeconfig} fi chmod 440 ${kubeconfig} if ! grep -q "kubectl completion bash" /etc/profile; then echo "export KUBECONFIG=${kubeconfig}" >> /etc/profile echo "source <(kubectl completion bash)" >> /etc/profile fi } function kubernetes_resource_exists() { local namespace=$1 local kind=$2 local name=$3 kubectl -n "$namespace" get "$kind" "$name" &>/dev/null } function install_cri() { # In the event someone changes the installer spec from docker to containerd, maintain backward capability with old installs if [ -n "$DOCKER_VERSION" ] ; then export REPORTING_CONTEXT_INFO="docker $DOCKER_VERSION" report_install_docker export REPORTING_CONTEXT_INFO="" elif [ -n "$CONTAINERD_VERSION" ]; then export REPORTING_CONTEXT_INFO="containerd $CONTAINERD_VERSION" report_install_containerd export REPORTING_CONTEXT_INFO="" fi } function report_install_docker() { report_addon_start "docker" "$DOCKER_VERSION" install_docker apply_docker_config report_addon_success "docker" "$DOCKER_VERSION" } function report_install_containerd() { containerd_get_host_packages_online "$CONTAINERD_VERSION" addon_install containerd "$CONTAINERD_VERSION" } function load_images() { if [ -n "$DOCKER_VERSION" ]; then find "$1" -type f | xargs -I {} bash -c "docker load < {}" else find "$1" -type f | xargs -I {} bash -c "cat {} | gunzip | ctr -a $(${K8S_DISTRO}_get_containerd_sock) -n=k8s.io images import -" fi } # try a command every 2 seconds until it succeeds, up to 30 tries max; useful for kubectl commands # where the Kubernetes API could be restarting function try_1m() { local fn="$1" local args=${@:2} n=0 while ! $fn $args 2>/dev/null ; do n="$(( $n + 1 ))" if [ "$n" -ge "30" ]; then # for the final try print the error and let it exit echo "" try_output="$($fn $args 2>&1)" || true echo "$try_output" bail "spent 1m attempting to run \"$fn $args\" without success" fi sleep 2 done } # try a command every 2 seconds until it succeeds, up to 150 tries max; useful for kubectl commands # where the Kubernetes API could be restarting function try_5m() { local fn="$1" local args=${@:2} n=0 while ! $fn $args 2>/dev/null ; do n="$(( $n + 1 ))" if [ "$n" -ge "150" ]; then # for the final try print the error and let it exit echo "" try_output="$($fn $args 2>&1)" || true echo "$try_output" bail "spent 5m attempting to run \"$fn $args\" without success" fi sleep 2 done } # try a command every 2 seconds until it succeeds, up to 30 tries max; useful for kubectl commands # where the Kubernetes API could be restarting # does not redirect stderr to /dev/null function try_1m_stderr() { local fn="$1" local args=${@:2} n=0 while ! $fn $args ; do n="$(( $n + 1 ))" if [ "$n" -ge "30" ]; then # for the final try print the error and let it exit echo "" try_output="$($fn $args 2>&1)" || true echo "$try_output" bail "spent 1m attempting to run \"$fn $args\" without success" fi sleep 2 done } # Run a test every second with a spinner until it succeeds function spinner_until() { local timeoutSeconds="$1" local cmd="$2" local args=${@:3} if [ -z "$timeoutSeconds" ]; then timeoutSeconds=-1 fi local delay=1 local elapsed=0 local spinstr='|/-\' while ! $cmd $args; do elapsed=$(($elapsed + $delay)) if [ "$timeoutSeconds" -ge 0 ] && [ "$elapsed" -gt "$timeoutSeconds" ]; then return 1 fi local temp=${spinstr#?} printf " [%c] " "$spinstr" local spinstr=$temp${spinstr%"$temp"} sleep $delay printf "\b\b\b\b\b\b" done } function get_common() { if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then curl -sSOL "$(get_dist_url)/common.tar.gz" tar xf common.tar.gz rm common.tar.gz fi } function get_shared() { if [ -f shared/kurl-util.tar ]; then if [ -n "$DOCKER_VERSION" ]; then docker load < shared/kurl-util.tar else ctr -a "$(${K8S_DISTRO}_get_containerd_sock)" -n=k8s.io images import shared/kurl-util.tar fi fi } function all_sudo_groups() { # examples of lines we're looking for in any sudo config files to find group with root privileges # %wheel ALL = (ALL) ALL # %google-sudoers ALL=(ALL:ALL) NOPASSWD:ALL # %admin ALL=(ALL) ALL cat /etc/sudoers | grep -Eo '^%\S+\s+ALL\s?=.*ALL\b' | awk '{print $1 }' | sed 's/%//' find /etc/sudoers.d/ -type f | xargs cat | grep -Eo '^%\S+\s+ALL\s?=.*ALL\b' | awk '{print $1 }' | sed 's/%//' } # if the sudo group cannot be detected default to root FOUND_SUDO_GROUP= function current_user_sudo_group() { if [ -z "$SUDO_UID" ]; then return 0 fi # return the first sudo group the current user belongs to while read -r groupName; do if id "$SUDO_UID" -Gn | grep -q "\b${groupName}\b"; then FOUND_SUDO_GROUP="$groupName" return 0 fi done < <(all_sudo_groups) } function kubeconfig_setup_outro() { current_user_sudo_group if [ -n "$FOUND_SUDO_GROUP" ]; then printf "To access the cluster with kubectl, reload your shell:\n" printf "\n" printf "${GREEN} bash -l${NC}\n" return fi local owner="$SUDO_UID" if [ -z "$owner" ]; then # not currently running via sudo owner="$USER" else # running via sudo - automatically create ~/.kube/config if it does not exist ownerdir=`eval echo "~$(id -un $owner)"` if [ ! -f "$ownerdir/.kube/config" ]; then mkdir -p $ownerdir/.kube cp "$(${K8S_DISTRO}_get_kubeconfig)" $ownerdir/.kube/config chown -R $owner $ownerdir/.kube printf "To access the cluster with kubectl, ensure the KUBECONFIG environment variable is unset:\n" printf "\n" printf "${GREEN} echo unset KUBECONFIG >> ~/.bash_profile${NC}\n" printf "${GREEN} bash -l${NC}\n" return fi fi printf "To access the cluster with kubectl, copy kubeconfig to your home directory:\n" printf "\n" printf "${GREEN} cp "$(${K8S_DISTRO}_get_kubeconfig)" ~/.kube/config${NC}\n" printf "${GREEN} chown -R ${owner} ~/.kube${NC}\n" printf "${GREEN} echo unset KUBECONFIG >> ~/.bash_profile${NC}\n" printf "${GREEN} bash -l${NC}\n" printf "\n" printf "You will likely need to use sudo to copy and chown "$(${K8S_DISTRO}_get_kubeconfig)".\n" } splitHostPort() { oIFS="$IFS"; IFS=":" read -r HOST PORT <<< "$1"; IFS="$oIFS" } isValidIpv4() { if echo "$1" | grep -qs '^[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*$'; then return 0 else return 1 fi } isValidIpv6() { if echo "$1" | grep -qs "^\([0-9a-fA-F]\{0,4\}:\)\{1,7\}[0-9a-fA-F]\{0,4\}$"; then return 0 else return 1 fi } function cert_has_san() { local address=$1 local san=$2 echo "Q" | openssl s_client -connect "$address" 2>/dev/null | openssl x509 -noout -text 2>/dev/null | grep --after-context=1 'X509v3 Subject Alternative Name' | grep -q "$2" } # By default journald persists logs if the directory /var/log/journal exists so create it if it's # not found. Sysadmins may still disable persistent logging with /etc/systemd/journald.conf. function journald_persistent() { if [ -d /var/log/journal ]; then return 0 fi mkdir -p /var/log/journal systemd-tmpfiles --create --prefix /var/log/journal systemctl restart systemd-journald journalctl --flush } function rm_file() { if [ -f "$1" ]; then rm $1 fi } # Checks if the provided param is in the current path, and if it is not adds it # this is useful for systems where /usr/local/bin is not in the path for root function path_add() { if [ -d "$1" ] && [[ ":$PATH:" != *":$1:"* ]]; then PATH="${PATH:+"$PATH:"}$1" fi } function install_host_dependencies() { install_host_dependencies_openssl } function install_host_dependencies_openssl() { if commandExists "openssl"; then return fi if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then local package="host-openssl.tar.gz" package_download "${package}" tar xf "$(package_filepath "${package}")" fi install_host_archives "${DIR}/packages/host/openssl" openssl } function maybe_read_kurl_config_from_cluster() { if [ -n "${KURL_INSTALL_DIRECTORY_FLAG}" ]; then return fi local kurl_install_directory_flag # we don't yet have KUBECONFIG when this is called from the top of install.sh kurl_install_directory_flag="$(KUBECONFIG="$(kubeadm_get_kubeconfig)" kubectl -n kube-system get cm kurl-config -ojsonpath='{ .data.kurl_install_directory }' 2>/dev/null || echo "")" if [ -z "${kurl_install_directory_flag}" ]; then kurl_install_directory_flag="$(KUBECONFIG="$(rke2_get_kubeconfig)" kubectl -n kube-system get cm kurl-config -ojsonpath='{ .data.kurl_install_directory }' 2>/dev/null || echo "")" fi if [ -n "${kurl_install_directory_flag}" ]; then KURL_INSTALL_DIRECTORY_FLAG="${kurl_install_directory_flag}" KURL_INSTALL_DIRECTORY="$(realpath ${kurl_install_directory_flag})/kurl" fi # this function currently only sets KURL_INSTALL_DIRECTORY # there are many other settings in kurl-config } KURL_INSTALL_DIRECTORY=/var/lib/kurl function pushd_install_directory() { local tmpfile tmpfile="${KURL_INSTALL_DIRECTORY}/tmpfile" if ! mkdir -p "${KURL_INSTALL_DIRECTORY}" || ! touch "${tmpfile}" ; then bail "Directory ${KURL_INSTALL_DIRECTORY} is not writeable by this script. Please either change the directory permissions or override the installation directory with the flag \"kurl-install-directory\"." fi rm "${tmpfile}" pushd "${KURL_INSTALL_DIRECTORY}" 1>/dev/null } function popd_install_directory() { popd 1>/dev/null } function move_airgap_assets() { local cwd cwd="$(pwd)" if [ "${KURL_INSTALL_DIRECTORY}" = "${cwd}/kurl" ]; then return fi pushd_install_directory # make sure we have access popd_install_directory # The airgap bundle will extract everything into ./kurl directory. # Move all assets except the scripts into the $KURL_INSTALL_DIRECTORY to emulate the online install experience. if [ "$(ls -A "${cwd}"/kurl)" ]; then for file in "${cwd}"/kurl/*; do rm -rf "${KURL_INSTALL_DIRECTORY}/$(basename ${file})" mv "${file}" "${KURL_INSTALL_DIRECTORY}/" done fi } function get_docker_registry_ip_flag() { local docker_registry_ip="$1" if [ -z "${docker_registry_ip}" ]; then return fi echo " docker-registry-ip=${docker_registry_ip}" } function get_force_reapply_addons_flag() { if [ "${FORCE_REAPPLY_ADDONS}" != "1" ]; then return fi echo " force-reapply-addons" } function get_additional_no_proxy_addresses_flag() { local has_proxy="$1" local no_proxy_addresses="$2" if [ -z "${has_proxy}" ]; then return fi echo " additional-no-proxy-addresses=${no_proxy_addresses}" } function get_kurl_install_directory_flag() { local kurl_install_directory="$1" if [ -z "${kurl_install_directory}" ] || [ "${kurl_install_directory}" = "/var/lib/kurl" ]; then return fi echo " kurl-install-directory=$(echo "${kurl_install_directory}")" } function get_remotes_flags() { while read -r primary; do printf " primary-host=$primary" done < <(kubectl get nodes --no-headers --selector="node-role.kubernetes.io/master" -owide | awk '{ print $6 }') while read -r secondary; do printf " secondary-host=$secondary" done < <(kubectl get node --no-headers --selector='!node-role.kubernetes.io/master' -owide | awk '{ print $6 }') } function systemd_restart_succeeded() { local oldPid=$1 local serviceName=$2 if ! systemctl is-active --quiet $serviceName; then return 1 fi local newPid="$(systemctl show --property MainPID $serviceName | cut -d = -f2)" if [ "$newPid" = "$oldPid" ]; then return 1 fi if ps -p $oldPid >/dev/null 2>&1; then return 1 fi return 0 } function restart_systemd_and_wait() { local serviceName=$1 local pid="$(systemctl show --property MainPID $serviceName | cut -d = -f2)" echo "Restarting $serviceName..." systemctl restart $serviceName if ! spinner_until 120 systemd_restart_succeeded $pid $serviceName; then journalctl -xe bail "Could not successfully restart systemd service $serviceName" fi echo "Service $serviceName restarted." } # returns true when a job has completed function job_is_completed() { local namespace="$1" local jobName="$2" kubectl get jobs -n "$namespace" "$jobName" | grep -q '1/1' } function maybe() { local cmd="$1" local args=${@:2} $cmd $args 2>/dev/null || true } MACHINE_ID= function get_machine_id() { MACHINE_ID="$(${DIR}/bin/kurl host protectedid || true)" } function kebab_to_camel() { echo "$1" | sed -E 's/-(.)/\U\1/g' } function build_installer_prefix() { local installer_id="$1" local kurl_version="$2" local kurl_url="$3" local proxy_address="$4" if [ -z "${kurl_url}" ]; then echo "cat " return fi local curl_flags= if [ -n "${proxy_address}" ]; then curl_flags=" -x ${proxy_address}" fi if [ -n "${kurl_version}" ]; then echo "curl -fsSL${curl_flags} ${kurl_url}/version/${kurl_version}/${installer_id}/" else echo "curl -fsSL${curl_flags} ${kurl_url}/${installer_id}/" fi } function get_local_node_name() { hostname | tr '[:upper:]' '[:lower:]' } function discover() { local fullCluster="$1" detectLsbDist discoverCurrentKubernetesVersion "$fullCluster" # never upgrade docker underneath kubernetes if docker version >/dev/null 2>&1 ; then SKIP_DOCKER_INSTALL=1 if [ -n "$DOCKER_VERSION" ]; then echo "Docker already exists on this machine so no docker install will be performed" fi fi discover_public_ip discover_private_ip KERNEL_MAJOR=$(uname -r | cut -d'.' -f1) KERNEL_MINOR=$(uname -r | cut -d'.' -f2) } LSB_DIST= DIST_VERSION= DIST_VERSION_MAJOR= detectLsbDist() { _dist= _error_msg="We have checked /etc/os-release and /etc/centos-release files." if [ -f /etc/centos-release ] && [ -r /etc/centos-release ]; then # CentOS 6 example: CentOS release 6.9 (Final) # CentOS 7 example: CentOS Linux release 7.5.1804 (Core) _dist="$(cat /etc/centos-release | cut -d" " -f1)" _version="$(cat /etc/centos-release | sed 's/Linux //' | cut -d" " -f3 | cut -d "." -f1-2)" elif [ -f /etc/os-release ] && [ -r /etc/os-release ]; then _dist="$(. /etc/os-release && echo "$ID")" _version="$(. /etc/os-release && echo "$VERSION_ID")" elif [ -f /etc/redhat-release ] && [ -r /etc/redhat-release ]; then # this is for RHEL6 _dist="rhel" _major_version=$(cat /etc/redhat-release | cut -d" " -f7 | cut -d "." -f1) _minor_version=$(cat /etc/redhat-release | cut -d" " -f7 | cut -d "." -f2) _version=$_major_version elif [ -f /etc/system-release ] && [ -r /etc/system-release ]; then if grep --quiet "Amazon Linux" /etc/system-release; then # Special case for Amazon 2014.03 _dist="amzn" _version=`awk '/Amazon Linux/{print $NF}' /etc/system-release` fi else _error_msg="$_error_msg\nDistribution cannot be determined because neither of these files exist." fi if [ -n "$_dist" ]; then _error_msg="$_error_msg\nDetected distribution is ${_dist}." _dist="$(echo "$_dist" | tr '[:upper:]' '[:lower:]')" case "$_dist" in ubuntu) _error_msg="$_error_msg\nHowever detected version $_version is less than 12." oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; [ $1 -ge 12 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 ;; debian) _error_msg="$_error_msg\nHowever detected version $_version is less than 7." oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; [ $1 -ge 7 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 ;; fedora) _error_msg="$_error_msg\nHowever detected version $_version is less than 21." oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; [ $1 -ge 21 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 ;; rhel) _error_msg="$_error_msg\nHowever detected version $_version is less than 7." oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; [ $1 -ge 6 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 ;; centos) _error_msg="$_error_msg\nHowever detected version $_version is less than 6." oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; [ $1 -ge 6 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 ;; amzn) _error_msg="$_error_msg\nHowever detected version $_version is not one of\n 2, 2.0, 2018.03, 2017.09, 2017.03, 2016.09, 2016.03, 2015.09, 2015.03, 2014.09, 2014.03." [ "$_version" = "2" ] || [ "$_version" = "2.0" ] || \ [ "$_version" = "2018.03" ] || \ [ "$_version" = "2017.03" ] || [ "$_version" = "2017.09" ] || \ [ "$_version" = "2016.03" ] || [ "$_version" = "2016.09" ] || \ [ "$_version" = "2015.03" ] || [ "$_version" = "2015.09" ] || \ [ "$_version" = "2014.03" ] || [ "$_version" = "2014.09" ] && \ LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$_version ;; sles) _error_msg="$_error_msg\nHowever detected version $_version is less than 12." oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; [ $1 -ge 12 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 ;; ol) _error_msg="$_error_msg\nHowever detected version $_version is less than 6." oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; [ $1 -ge 6 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 ;; *) _error_msg="$_error_msg\nThat is an unsupported distribution." ;; esac fi if [ -z "$LSB_DIST" ]; then echo >&2 "$(echo | sed "i$_error_msg")" echo >&2 "" echo >&2 "Please visit the following URL for more detailed installation instructions:" echo >&2 "" echo >&2 " https://help.replicated.com/docs/distributing-an-application/installing/" exit 1 fi } KUBERNETES_STEP_LOCAL_PRIMARY=0 KUBERNETES_UPGRADE_LOCAL_PRIMARY_MINOR=0 KUBERNETES_UPGRADE_LOCAL_PRIMARY_PATCH=0 KUBERNETES_STEP_REMOTE_PRIMARIES=0 KUBERNETES_UPGRADE_REMOTE_PRIMARIES_MINOR=0 KUBERNETES_UPGRADE_REMOTE_PRIMARIES_PATCH=0 KUBERNETES_STEP_SECONDARIES=0 KUBERNETES_UPGRADE_SECONDARIES_MINOR=0 KUBERNETES_UPGRADE_SECONDARIES_PATCH=0 discoverCurrentKubernetesVersion() { local fullCluster="$1" set +e CURRENT_KUBERNETES_VERSION=$(cat /etc/kubernetes/manifests/kube-apiserver.yaml 2>/dev/null | grep image: | grep -oE '[0-9]+.[0-9]+.[0-9]+') set -e if [ -z "$CURRENT_KUBERNETES_VERSION" ]; then # This is a new install and no upgrades are required return 0 fi # These versions are for the local primary semverParse $CURRENT_KUBERNETES_VERSION KUBERNETES_CURRENT_VERSION_MAJOR="$major" KUBERNETES_CURRENT_VERSION_MINOR="$minor" KUBERNETES_CURRENT_VERSION_PATCH="$patch" if [ -z "$fullCluster" ]; then return 0 fi # Populate arrays with versions of remote nodes kubernetes_get_remote_primaries kubernetes_get_secondaries # If any nodes have a lower minor than this then we'll need to do an extra step upgrade STEP_VERSION_MINOR=$(($KUBERNETES_TARGET_VERSION_MINOR - 1)) # These will be used in preflight checks LOWEST_SUPPORTED_MINOR=$(($STEP_VERSION_MINOR - 1)) MIN_CLUSTER_NODE_MINOR_FOUND=$KUBERNETES_CURRENT_VERSION_MINOR MAX_CLUSTER_NODE_MINOR_FOUND=$KUBERNETES_CURRENT_VERSION_MINOR # Check if minor, step, or patch upgrades are needed for the local primary if [ $KUBERNETES_CURRENT_VERSION_MINOR -lt $STEP_VERSION_MINOR ]; then KUBERNETES_STEP_LOCAL_PRIMARY=1 KUBERNETES_UPGRADE_LOCAL_PRIMARY_MINOR=1 KUBERNETES_UPGRADE=1 elif [ $KUBERNETES_CURRENT_VERSION_MINOR -lt $KUBERNETES_TARGET_VERSION_MINOR ]; then KUBERNETES_UPGRADE_LOCAL_PRIMARY_MINOR=1 KUBERNETES_UPGRADE=1 elif [ $KUBERNETES_CURRENT_VERSION_PATCH -lt $KUBERNETES_TARGET_VERSION_PATCH ]; then KUBERNETES_UPGRADE_LOCAL_PRIMARY_PATCH=1 KUBERNETES_UPGRADE=1 fi # Check for upgrades required on remote primaries for i in ${!KUBERNETES_REMOTE_PRIMARIES[@]}; do semverParse ${KUBERNETES_REMOTE_PRIMARY_VERSIONS[$i]} # Adjust min and max minor vars for preflights if [ $minor -lt $MIN_CLUSTER_NODE_MINOR_FOUND ]; then MIN_CLUSTER_NODE_MINOR_FOUND=$minor fi if [ $minor -gt $MAX_CLUSTER_NODE_MINOR_FOUND ]; then MAX_CLUSTER_NODE_MINOR_FOUND=$minor fi # Check step, minor, and patch for this remote primary if [ $minor -lt $STEP_VERSION_MINOR ]; then KUBERNETES_STEP_REMOTE_PRIMARIES=1 KUBERNETES_UPGRADE_REMOTE_PRIMARIES_MINOR=1 KUBERNETES_UPGRADE=1 elif [ $minor -lt $KUBERNETES_TARGET_VERSION_MINOR ]; then KUBERNETES_UPGRADE_REMOTE_PRIMARIES_MINOR=1 KUBERNETES_UPGRADE=1 elif [ $patch -lt $KUBERNETES_TARGET_VERSION_PATCH ]; then KUBERNETES_UPGRADE_REMOTE_PRIMARIES_PATCH=1 KUBERNETES_UPGRADE=1 fi done # Check for upgrades required on remote secondaries for i in ${!KUBERNETES_SECONDARIES[@]}; do semverParse ${KUBERNETES_SECONDARY_VERSIONS[$i]} # Adjust min and max minor vars for preflights if [ $minor -lt $MIN_CLUSTER_NODE_MINOR_FOUND ]; then MIN_CLUSTER_NODE_MINOR_FOUND=$minor fi if [ $minor -gt $MAX_CLUSTER_NODE_MINOR_FOUND ]; then MAX_CLUSTER_NODE_MINOR_FOUND=$minor fi # Check step, minor, and patch for this secondary if [ $minor -lt $STEP_VERSION_MINOR ]; then KUBERNETES_STEP_SECONDARIES=1 KUBERNETES_UPGRADE_SECONDARIES_MINOR=1 KUBERNETES_UPGRADE=1 elif [ $minor -lt $KUBERNETES_TARGET_VERSION_MINOR ]; then KUBERNETES_UPGRADE_SECONDARIES_MINOR=1 KUBERNETES_UPGRADE=1 elif [ $patch -lt $KUBERNETES_TARGET_VERSION_PATCH ]; then KUBERNETES_UPGRADE_SECONDARIES_PATCH=1 KUBERNETES_UPGRADE=1 fi done # preflights if [ $MAX_CLUSTER_NODE_MINOR_FOUND -gt $KUBERNETES_TARGET_VERSION_MINOR ]; then printf "%s %s %s" \ "The currently installed kubernetes version is 1.${MAX_CLUSTER_NODE_MINOR_FOUND}." \ "The requested version to upgrade to is ${KUBERNETES_VERSION}." \ "Since the currently installed version is newer than the requested version, no action will be taken." bail fi if [ $MIN_CLUSTER_NODE_MINOR_FOUND -lt $LOWEST_SUPPORTED_MINOR ]; then MAX_UPGRADEABLE_VERSION_MINOR=$(($MIN_CLUSTER_NODE_MINOR_FOUND + 2)) printf "%s %s %s" \ "The currently installed kubernetes version is ${CURRENT_KUBERNETES_VERSION}." \ "The requested version to upgrade to is ${KUBERNETES_VERSION}." \ "Kurl can only be upgraded two minor versions at time. Please install ${KUBERNETES_TARGET_VERSION_MAJOR}.${MAX_UPGRADEABLE_VERSION_MINOR}.x. first." bail fi if [ "$KUBERNETES_STEP_LOCAL_PRIMARY" == "1" ] || [ "$KUBERNETES_STEP_REMOTE_PRIMARIES" == "1" ] || [ "$KUBERNETES_STEP_SECONDARIES" == 1 ]; then STEP_VERSION=${STEP_VERSIONS[$STEP_VERSION_MINOR]} fi } function get_docker_version() { if ! commandExists "docker" ; then return fi docker -v | awk '{gsub(/,/, "", $3); print $3}' } discover_public_ip() { if [ "$AIRGAP" == "1" ]; then return fi # gce set +e _out=$(curl --noproxy "*" --max-time 5 --connect-timeout 2 -qSfs -H 'Metadata-Flavor: Google' http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip 2>/dev/null) _status=$? set -e if [ "$_status" -eq "0" ] && [ -n "$_out" ]; then if isValidIpv4 "$_out" || isValidIpv6 "$_out"; then PUBLIC_ADDRESS=$_out fi return fi # ec2 set +e _out=$(curl --noproxy "*" --max-time 5 --connect-timeout 2 -qSfs http://169.254.169.254/latest/meta-data/public-ipv4 2>/dev/null) _status=$? set -e if [ "$_status" -eq "0" ] && [ -n "$_out" ]; then if isValidIpv4 "$_out" || isValidIpv6 "$_out"; then PUBLIC_ADDRESS=$_out fi return fi # azure set +e _out=$(curl --noproxy "*" --max-time 5 --connect-timeout 2 -qSfs -H Metadata:true "http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/publicIpAddress?api-version=2017-08-01&format=text" 2>/dev/null) _status=$? set -e if [ "$_status" -eq "0" ] && [ -n "$_out" ]; then if isValidIpv4 "$_out" || isValidIpv6 "$_out"; then PUBLIC_ADDRESS=$_out fi return fi } function discover_private_ip() { if [ -n "$PRIVATE_ADDRESS" ]; then return 0 fi PRIVATE_ADDRESS="$(${K8S_DISTRO}_discover_private_ip)" } function discover_non_loopback_nameservers() { local resolvConf=/etc/resolv.conf # https://github.com/kubernetes/kubernetes/blob/v1.19.3/cmd/kubeadm/app/componentconfigs/kubelet.go#L211 if systemctl is-active -q systemd-resolved; then resolvConf=/run/systemd/resolve/resolv.conf fi cat $resolvConf | grep -E '^nameserver\s+' | grep -Eqv '^nameserver\s+127' } function change_cgroup_driver_to_systemd() { # Docker uses cgroupfs by default to manage cgroup. On distributions using systemd, # i.e. RHEL and Ubuntu, this causes issues because there are now 2 seperate ways # to manage resources. For more info see the link below. # https://github.com/kubernetes/kubeadm/issues/1394#issuecomment-462878219 if [ -f /var/lib/kubelet/kubeadm-flags.env ] || [ -f /etc/docker/daemon.json ]; then return fi mkdir -p /etc/docker cat > /etc/docker/daemon.json </dev/null | grep 'Storage Driver' | awk '{print $3}' | awk -F- '{print $1}') if [ "$_driver" = "devicemapper" ] && docker info 2>/dev/null | grep -Fqs 'Data loop file:' ; then printf "${RED}The running Docker daemon is configured to use the 'devicemapper' storage driver \ in loopback mode.\nThis is not recommended for production use. Please see to the following URL for more \ information.\n\nhttps://help.replicated.com/docs/kb/developer-resources/devicemapper-warning/.${NC}\n\n\ " # HARD_FAIL_ON_LOOPBACK if [ -n "$1" ]; then printf "${RED}Please configure a recommended storage driver and try again.${NC}\n\n" exit 1 fi printf "Do you want to proceed anyway? " if ! confirmN; then exit 0 fi fi } docker_configure_proxy() { local previous_proxy=$(docker info 2>/dev/null | grep -i 'Http Proxy:' | awk '{ print $NF }') local previous_no_proxy=$(docker info 2>/dev/null | grep -i 'No Proxy:' | awk '{ print $NF }') if [ "$PROXY_ADDRESS" = "$previous_proxy" ] && [ "$NO_PROXY_ADDRESSES" = "$previous_no_proxy" ]; then return fi mkdir -p /etc/systemd/system/docker.service.d local file=/etc/systemd/system/docker.service.d/http-proxy.conf echo "# Generated by kURL" > $file echo "[Service]" >> $file if echo "$PROXY_ADDRESS" | grep -q "^https"; then echo "Environment=\"HTTPS_PROXY=${PROXY_ADDRESS}\" \"NO_PROXY=${NO_PROXY_ADDRESSES}\"" >> $file else echo "Environment=\"HTTP_PROXY=${PROXY_ADDRESS}\" \"NO_PROXY=${NO_PROXY_ADDRESSES}\"" >> $file fi restart_docker } function docker_get_host_packages_online() { local version="$1" if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then rm -rf $DIR/packages/docker/${version} # Cleanup broken/incompatible packages from failed runs local package="docker-${version}.tar.gz" package_download "${package}" tar xf "$(package_filepath "${package}")" # rm docker-${version}.tar.gz fi } function containerd_get_host_packages_online() { local version="$1" if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then rm -rf $DIR/packages/containerd/${version} # Cleanup broken/incompatible packages from failed runs local package="containerd-${version}.tar.gz" package_download "${package}" tar xf "$(package_filepath "${package}")" # rm containerd-${version}.tar.gz fi } function canonical_image_name() { local image="$1" if echo "$image" | grep -vq '/' ; then image="library/$image" fi if echo "$image" | awk -F'/' '{print $1}' | grep -vq '\.' ; then image="docker.io/$image" fi if echo "$image" | grep -vq ':' ; then image="$image:latest" fi echo "$image" } function install_helm() { if [ "$MASTER" = "1" ]; then cp -f $DIR/helm/helm /usr/local/bin/ cp -f $DIR/helm/helmfile /usr/local/bin/ path_add "/usr/local/bin" fi } function helmfile_sync() { if [ -z "$HELM_HELMFILE_SPEC" ]; then return 0 fi logStep "Installing Helm Charts using the Helmfile Spec" # TODO (dan): add reporting for helm # report_helm_start printf "${HELM_HELMFILE_SPEC}" > helmfile-tmp.yaml if [ "$AIRGAP" != "1" ]; then helmfile --file helmfile-tmp.yaml deps # || report_helm_failure #TODO (dan): add reporting fi # TODO (dan): To support air gap case, we might need to modify the helmfile to always run the local chart helmfile --file helmfile-tmp.yaml sync # || report_helm_failure #TODO (dan): add reporting rm helmfile-tmp.yaml # TODO (dan): add reporting for helm # report_helm_success } function helm_load() { if [ "$AIRGAP" = "1" ] && [ -n "$HELM_HELMFILE_SPEC" ] ; then # TODO (dan): Implement airgapped loading after bundler is updated bail "Airgap Installation with Helm is currently not supported" #load_images $DIR/helm-bundle/images fi } function install_host_archives() { local dir="$1" local dir_prefix="/archives" local packages=("${@:2}") _install_host_packages "$dir" "$dir_prefix" "${packages[@]}" } function install_host_packages() { local dir="$1" local dir_prefix="" local packages=("${@:2}") _install_host_packages "$dir" "$dir_prefix" "${packages[@]}" } function rpm_force_install_host_archives() { local dir="$1" local dir_prefix="/archives" local packages=("${@:2}") _rpm_force_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" } function rpm_force_install_host_packages() { local dir="$1" local dir_prefix="" local packages=("${@:2}") _rpm_force_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" } function _install_host_packages() { local dir="$1" local dir_prefix="$2" local packages=("${@:3}") case "$LSB_DIST" in ubuntu) _dpkg_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" ;; centos|rhel|ol) _yum_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" ;; amzn) local fullpath= fullpath="$(realpath "${dir}")/rhel-7-force${dir_prefix}" if test -n "$(shopt -s nullglob; echo "${fullpath}"/*.rpm)" ; then _rpm_force_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" else _yum_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" fi ;; *) bail "Host package install is not supported on ${LSB_DIST} ${DIST_MAJOR}" ;; esac } function _rpm_force_install_host_packages() { local dir="$1" local dir_prefix="$2" local packages=("${@:3}") logStep "Installing host packages ${packages[*]}" local fullpath= fullpath="$(realpath "${dir}")/rhel-7-force${dir_prefix}" if ! test -n "$(shopt -s nullglob; echo "${fullpath}"/*.rpm)" ; then echo "Will not install host packages ${packages[*]}, no packages found." return 0 fi rpm --upgrade --force --nodeps --nosignature "${fullpath}"/*.rpm logSuccess "Host packages ${packages[*]} installed" } function dpkg_install_host_archives() { local dir="$1" local dir_prefix="/archives" local packages=("${@:2}") _dpkg_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" } function dpkg_install_host_packages() { local dir="$1" local dir_prefix="" local packages=("${@:2}") _dpkg_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" } function _dpkg_install_host_packages() { local dir="$1" local dir_prefix="$2" local packages=("${@:3}") logStep "Installing host packages ${packages[*]}" local fullpath= fullpath="${dir}/ubuntu-${DIST_VERSION}${dir_prefix}" if ! test -n "$(shopt -s nullglob; echo "${fullpath}"/*.deb)" ; then echo "Will not install host packages ${packages[*]}, no packages found." return 0 fi DEBIAN_FRONTEND=noninteractive dpkg --install --force-depends-version --force-confold "${fullpath}"/*.deb logSuccess "Host packages ${packages[*]} installed" } function yum_install_host_archives() { local dir="$1" local dir_prefix="/archives" local packages=("${@:2}") _yum_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" } function yum_install_host_packages() { local dir="$1" local dir_prefix="" local packages=("${@:2}") _yum_install_host_packages "$dir" "$dir_prefix" "${packages[@]}" } function _yum_install_host_packages() { local dir="$1" local dir_prefix="$2" local packages=("${@:3}") logStep "Installing host packages ${packages[*]}" local fullpath= fullpath="$(_yum_get_host_packages_path "${dir}" "${dir_prefix}")" if ! test -n "$(shopt -s nullglob; echo "${fullpath}"/*.rpm)" ; then echo "Will not install host packages ${packages[*]}, no packages found." return 0 fi cat > /etc/yum.repos.d/kurl.local.repo </dev/null || echo); do # patch_basename="$(basename $patch)" # cp $patch $kustomize_kubeadm_init/$patch_basename # insert_patches_strategic_merge \ # $kustomize_kubeadm_init/kustomization.yaml \ # $patch_basename # done # mkdir -p "$KUBEADM_CONF_DIR" # kubectl kustomize $kustomize_kubeadm_init > $KUBEADM_CONF_DIR/kubeadm-init-raw.yaml # render_yaml_file $KUBEADM_CONF_DIR/kubeadm-init-raw.yaml > $KUBEADM_CONF_FILE # # kustomize requires assests have a metadata field while kubeadm config will reject yaml containing it # # this uses a go binary found in kurl/cmd/yamlutil to strip the metadata field from the yaml # # # cp $KUBEADM_CONF_FILE $KUBEADM_CONF_DIR/kubeadm_conf_copy_in # $DIR/bin/yamlutil -r -fp $KUBEADM_CONF_DIR/kubeadm_conf_copy_in -yf metadata # mv $KUBEADM_CONF_DIR/kubeadm_conf_copy_in $KUBEADM_CONF_FILE # cat << EOF >> $KUBEADM_CONF_FILE # apiVersion: kubelet.config.k8s.io/v1beta1 # kind: KubeletConfiguration # cgroupDriver: systemd # --- # EOF # # When no_proxy changes kubeadm init rewrites the static manifests and fails because the api is # # restarting. Trigger the restart ahead of time and wait for it to be healthy. # if [ -f "/etc/kubernetes/manifests/kube-apiserver.yaml" ] && [ -n "$no_proxy" ] && ! cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep -q "$no_proxy"; then # kubeadm init phase control-plane apiserver --config $KUBEADM_CONF_FILE # sleep 2 # if ! spinner_until 60 kubernetes_api_is_healthy; then # echo "Failed to wait for kubernetes API restart after no_proxy change" # continue # fi # fi # if [ "$HA_CLUSTER" = "1" ]; then # UPLOAD_CERTS="--upload-certs" # fi # # kubeadm init temporarily taints this node which causes rook to move any mons on it and may # # lead to a loss of quorum # disable_rook_ceph_operator # # since K8s 1.19.1 kubeconfigs point to local API server even in HA setup. When upgrading from # # earlier versions and using a load balancer, kubeadm init will bail because the kubeconfigs # # already exist pointing to the load balancer # rm -rf /etc/kubernetes/*.conf # # Regenerate api server cert in case load balancer address changed # if [ -f /etc/kubernetes/pki/apiserver.crt ]; then # mv -f /etc/kubernetes/pki/apiserver.crt /tmp/ # fi # if [ -f /etc/kubernetes/pki/apiserver.key ]; then # mv -f /etc/kubernetes/pki/apiserver.key /tmp/ # fi # set -o pipefail # kubeadm init \ # --ignore-preflight-errors=all \ # --config $KUBEADM_CONF_FILE \ # $UPLOAD_CERTS \ # | tee /tmp/kubeadm-init # set +o pipefail # if [ -n "$LOAD_BALANCER_ADDRESS" ]; then # spinner_until 120 cert_has_san "$PRIVATE_ADDRESS:6443" "$LOAD_BALANCER_ADDRESS" # fi # spinner_kubernetes_api_stable # exportKubeconfig # This was moved to the setup function # KUBEADM_TOKEN_CA_HASH=$(cat /tmp/kubeadm-init | grep 'discovery-token-ca-cert-hash' | awk '{ print $2 }' | head -1) wait_for_nodes enable_rook_ceph_operator DID_INIT_KUBERNETES=1 # logSuccess "Kubernetes Master Initialized" # local currentLoadBalancerAddress=$(kubernetes_load_balancer_address) # if [ "$currentLoadBalancerAddress" != "$oldLoadBalancerAddress" ]; then # # restart scheduler and controller-manager on this node so they use the new address # mv /etc/kubernetes/manifests/kube-scheduler.yaml /tmp/ && sleep 1 && mv /tmp/kube-scheduler.yaml /etc/kubernetes/manifests/ # mv /etc/kubernetes/manifests/kube-controller-manager.yaml /tmp/ && sleep 1 && mv /tmp/kube-controller-manager.yaml /etc/kubernetes/manifests/ # # restart kube-proxies so they use the new address # kubectl -n kube-system delete pods --selector=k8s-app=kube-proxy # if kubernetes_has_remotes; then # local proxyFlag="" # if [ -n "$PROXY_ADDRESS" ]; then # proxyFlag=" -x $PROXY_ADDRESS" # fi # local prefix="curl -sSL${proxyFlag} $KURL_URL/$INSTALLER_ID/" # if [ "$AIRGAP" = "1" ] || [ -z "$KURL_URL" ]; then # prefix="cat " # fi # printf "${YELLOW}\nThe load balancer address has changed. Run the following on all remote nodes to use the new address${NC}\n" # printf "\n" # printf "${GREEN} ${prefix}tasks.sh | sudo bash -s set-kubeconfig-server https://${currentLoadBalancerAddress}${NC}\n" # printf "\n" # printf "Continue? " # confirmN # if commandExists ekco_handle_load_balancer_address_change_post_init; then # ekco_handle_load_balancer_address_change_post_init $oldLoadBalancerAddress $LOAD_BALANCER_ADDRESS # fi # fi # fi labelNodes kubectl cluster-info # create kurl namespace if it doesn't exist kubectl get ns kurl 2>/dev/null 1>/dev/null || kubectl create ns kurl 1>/dev/null logSuccess "Cluster Initialized" # TODO(dans): coredns is deployed through helm -> might need to go through values here # configure_coredns if commandExists registry_init; then registry_init fi } function k3s_install() { local k3s_version="$1" # TODO(ethan): is this still necessary? # kubernetes_sysctl_config local k8s_semver= k8s_semver="$(echo "${k3s_version}" | sed 's/^v\(.*\)-.*$/\1/')" # For online always download the k3s.tar.gz bundle. # Regardless if host packages are already installed, we always inspect for newer versions # and/or re-install any missing or corrupted packages. # TODO(ethan): is this comment correct? if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then k3s_get_host_packages_online "${k3s_version}" kubernetes_get_conformance_packages_online "${k8s_semver}" fi k3s_configure k3s_install_host_packages "${k3s_version}" k3s_load_images "${k3s_version}" if [ "$MASTER" == "1" ]; then k3s_server_setup_systemd_service else # TOOD (dan): agent nodes not supported. bail "Agent nodes for k3s are currently unsupported" fi k3s_create_symlinks k3s_modify_profiled spinner_containerd_is_healthy get_shared logStep "Installing plugins" install_plugins logSuccess "Plugins installed" # TODO(ethan) # install_kustomize # TODO(dan) do I need this while [ ! -f /etc/rancher/k3s/k3s.yaml ]; do sleep 2 done if [ -d "$DIR/packages/kubernetes-conformance/${k8s_semver}/images" ]; then load_images "$DIR/packages/kubernetes-conformance/${k8s_semver}/images" fi logStep "Waiting for Kubernetes" # Extending timeout to 5 min based on performance on clean machines. if ! spinner_until 300 get_nodes_succeeds ; then # this should exit script on non-zero exit code and print error message kubectl get nodes 1>/dev/null fi wait_for_default_namespace logSuccess "Kubernetes ready" # TODO(dan): Need to figure out how to let users run container tools as non-root } function k3s_preamble() { printf "${RED}" cat << "EOF" ( ) ( ____ )\ ) ( ( /( ( )\ ) | / (()/( )\ )\()) )\ ) ( (()/( | / /(_))((((_)( ((_)\ (()/( )\ /(_)) | / (_))_ )\ _ )\ _((_) /(_))_ ((_) (_)) |/ | \ (_)_\(_)| \| |(_)) __|| __|| _ \ ( | |) | / _ \ | .` | | (_ || _| | / )\ |___/ /_/ \_\ |_|\_| \___||___||_|_\((_) EOF printf "${NC}\n" printf "${RED}YOU ARE NOW INSTALLING K3S WITH KURL. THIS FEATURE IS EXPERIMENTAL!${NC}\n" printf "${RED}\t- It can be removed at any point in the future.${NC}\n" printf "${RED}\t- There are zero guarantees regarding addon compatibility.${NC}\n" printf "${RED}\n\nCONTINUING AT YOUR OWN RISK....${NC}\n\n" } function k3s_outro() { echo # if [ -z "$PUBLIC_ADDRESS" ]; then # if [ -z "$PRIVATE_ADDRESS" ]; then # PUBLIC_ADDRESS="" # PRIVATE_ADDRESS="" # else # PUBLIC_ADDRESS="$PRIVATE_ADDRESS" # fi # fi # local proxyFlag="" # if [ -n "$PROXY_ADDRESS" ]; then # proxyFlag=" -x $PROXY_ADDRESS" # fi # local common_flags # common_flags="${common_flags}$(get_docker_registry_ip_flag "${DOCKER_REGISTRY_IP}")" # common_flags="${common_flags}$(get_additional_no_proxy_addresses_flag "${PROXY_ADDRESS}" "${SERVICE_CIDR},${POD_CIDR}")" # common_flags="${common_flags}$(get_kurl_install_directory_flag "${KURL_INSTALL_DIRECTORY_FLAG}")" # TODO(dan): move this somewhere into the k8s distro # KUBEADM_TOKEN_CA_HASH=$(cat /tmp/kubeadm-init | grep 'discovery-token-ca-cert-hash' | awk '{ print $2 }' | head -1) printf "\n" printf "\t\t${GREEN}Installation${NC}\n" printf "\t\t${GREEN} Complete ✔${NC}\n" addon_outro printf "\n" # TODO(dan): specific to kubeadm config. # kubeconfig_setup_outro local prefix="curl -sSL${proxyFlag} $KURL_URL/$INSTALLER_ID/" if [ -z "$KURL_URL" ]; then prefix="cat " fi # if [ "$HA_CLUSTER" = "1" ]; then # printf "Master node join commands expire after two hours, and worker node join commands expire after 24 hours.\n" # printf "\n" # if [ "$AIRGAP" = "1" ]; then # printf "To generate new node join commands, run ${GREEN}cat ./tasks.sh | sudo bash -s join_token ha airgap${NC} on an existing master node.\n" # else # printf "To generate new node join commands, run ${GREEN}${prefix}tasks.sh | sudo bash -s join_token ha${NC} on an existing master node.\n" # fi # else # printf "Node join commands expire after 24 hours.\n" # printf "\n" # if [ "$AIRGAP" = "1" ]; then # printf "To generate new node join commands, run ${GREEN}cat ./tasks.sh | sudo bash -s join_token airgap${NC} on this node.\n" # else # printf "To generate new node join commands, run ${GREEN}${prefix}tasks.sh | sudo bash -s join_token${NC} on this node.\n" # fi # fi # if [ "$AIRGAP" = "1" ]; then # printf "\n" # printf "To add worker nodes to this installation, copy and unpack this bundle on your other nodes, and run the following:" # printf "\n" # printf "\n" # printf "${GREEN} cat ./join.sh | sudo bash -s airgap kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=${KUBEADM_TOKEN_CA_HASH} kubernetes-version=${KUBERNETES_VERSION}${common_flags}\n" # printf "${NC}" # printf "\n" # printf "\n" # if [ "$HA_CLUSTER" = "1" ]; then # printf "\n" # printf "To add ${GREEN}MASTER${NC} nodes to this installation, copy and unpack this bundle on your other nodes, and run the following:" # printf "\n" # printf "\n" # printf "${GREEN} cat ./join.sh | sudo bash -s airgap kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=${KUBEADM_TOKEN_CA_HASH} kubernetes-version=${KUBERNETES_VERSION} cert-key=${CERT_KEY} control-plane${common_flags}\n" # printf "${NC}" # printf "\n" # printf "\n" # fi # else # printf "\n" # printf "To add worker nodes to this installation, run the following script on your other nodes:" # printf "\n" # printf "${GREEN} ${prefix}join.sh | sudo bash -s kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=${KUBEADM_TOKEN_CA_HASH} kubernetes-version=${KUBERNETES_VERSION}${common_flags}\n" # printf "${NC}" # printf "\n" # printf "\n" # if [ "$HA_CLUSTER" = "1" ]; then # printf "\n" # printf "To add ${GREEN}MASTER${NC} nodes to this installation, run the following script on your other nodes:" # printf "\n" # printf "${GREEN} ${prefix}join.sh | sudo bash -s kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=$KUBEADM_TOKEN_CA_HASH kubernetes-version=${KUBERNETES_VERSION} cert-key=${CERT_KEY} control-plane${common_flags}\n" # printf "${NC}" # printf "\n" # printf "\n" # fi # fi } function k3s_main() { local k3s_version="$(echo "${K3S_VERSION}" | sed 's/+/-/')" k3s_preamble # K3S Begin # parse_kubernetes_target_version # TODO(dan): Version only makes sense for kuberntees discover full-cluster # TODO(dan): looks for docker and kubernetes, shouldn't hurt # report_install_start # TODO(dan) remove reporting for now. # trap prek8s_ctrl_c SIGINT # trap ctrl+c (SIGINT) and handle it by reporting that the user exited intentionally # TODO(dan) remove reporting for now. # preflights # TODO(dan): mostly good, but disable for now ${K8S_DISTRO}_addon_for_each addon_fetch # if [ -z "$CURRENT_KUBERNETES_VERSION" ]; then # TODO (ethan): support for CURRENT_KUBERNETES_VERSION # host_preflights "1" "0" "0" # else # host_preflights "1" "0" "1" # fi common_prompts # TODO(dan): shouldn't come into play for K3S journald_persistent configure_proxy install_host_dependencies get_common ${K8S_DISTRO}_addon_for_each addon_pre_init discover_pod_subnet # discover_service_subnet # TODO(dan): uses kubeadm configure_no_proxy k3s_install "${k3s_version}" # upgrade_kubernetes # TODO(dan): uses kubectl operator # kubernetes_host # TODO(dan): installs and sets up kubeadm, kubectl # setup_kubeadm_kustomize # TODO(dan): self-explainatory # trap k8s_ctrl_c SIGINT # trap ctrl+c (SIGINT) and handle it by asking for a support bundle - only do this after k8s is installed ${K8S_DISTRO}_addon_for_each addon_load # init # See next line k3s_init # TODO(dan): A mix of Kubeadm stuff and general setup. apply_installer_crd kurl_init_config ${K8S_DISTRO}_addon_for_each addon_install # post_init # TODO(dan): more kubeadm token setup k3s_outro package_cleanup # report_install_success # TODO(dan) remove reporting for now. } function k3s_configure() { if ! grep -qs "^write-kubeconfig:" /etc/rancher/k3s/config.yaml ; then mkdir -p /etc/rancher/k3s/ echo "write-kubeconfig: \"/etc/rancher/k3s/k3s.yaml\"" >> /etc/rancher/k3s/config.yaml K3S_SHOULD_RESTART=1 fi # prevent permission denied error when running kubectl if ! grep -qs "^write-kubeconfig-mode:" /etc/rancher/k3s/config.yaml ; then mkdir -p /etc/rancher/k3s/ echo "write-kubeconfig-mode: 644" >> /etc/rancher/k3s/config.yaml K3S_SHOULD_RESTART=1 fi # TODO(ethan): pod cidr # TODO(ethan): service cidr # TODO(ethan): http proxy # TODO(ethan): load balancer } function k3s_restart() { restart_systemd_and_wait "k3s-server.service" # TODO(ethan): k3s-agent.service? } function k3s_install_host_packages() { local k3s_version="$1" if k3s_host_packages_ok "${k3s_version}"; then logSuccess "K3S host packages already installed" if [ "${K3S_SHOULD_RESTART}" = "1" ]; then k3s_restart K3S_SHOULD_RESTART=0 fi return fi # install the selinux policy # TODO (dan): need to integrate this with SELinux settings in install.sh if [ -n "$K3S_SELINUX_ENABLED" ]; then case "$LSB_DIST" in centos|rhel|amzn|ol) install_host_packages "${DIR}/packages/k3s/${k3s_version}" k3s-selinux ;; *) bail "K3S install is not supported on ${LSB_DIST} ${DIST_MAJOR}" ;; esac fi # installs the k3s binary cp $DIR/packages/k-3-s/${k3s_version}/assets/k3s /usr/local/bin/ chmod 755 /usr/local/bin/k3s # TODO(ethan): is this still necessary? # if [ "$CLUSTER_DNS" != "$DEFAULT_CLUSTER_DNS" ]; then # sed -i "s/$DEFAULT_CLUSTER_DNS/$CLUSTER_DNS/g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf # fi } function k3s_host_packages_ok() { local k3s_version="$1" if ! commandExists k3s; then echo "k3s command missing - will install host components" return 1 fi kubelet --version | grep -q "$(echo $k3s_version | sed "s/-/+/")" } function k3s_get_host_packages_online() { local k3s_version="$1" rm -rf $DIR/packages/k3s/${k3s_version} # Cleanup broken/incompatible packages from failed runs local package="k-3-s-${k3s_version}.tar.gz" package_download "${package}" tar xf "$(package_filepath "${package}")" } function k3s_load_images() { local k3s_version="$1" logStep "Load K3S images" mkdir -p /var/lib/rancher/k3s/agent/images gunzip -c $DIR/packages/k-3-s/${k3s_version}/assets/k3s-images.linux-amd64.tar.gz > /var/lib/rancher/k3s/agent/images/k3s-images.linux-amd64.tar logSuccess "K3S images loaded" } function k3s_server_setup_systemd_service() { if [ -f "/etc/systemd/system/k3s-server.service" ]; then logSubstep "Systemd service for the K3S Server already exists. Skipping." return fi logStep "Creating K3S Server Systemd Service" k3s_create_env_file # Created Systemd unit from https://get.k3s.io/ # TODO (dan): check if this should be a server or agent tee /etc/systemd/system/k3s-server.service > /dev/null </dev/null env | egrep -i '^(NO|HTTP|HTTPS)_PROXY' | tee -a ${fileK3sEnv} >/dev/null umask $UMASK } function k3s_create_symlinks() { local binDir=/usr/local/bin for cmd in kubectl crictl ctr; do if [ ! -e ${binDir}/${cmd} ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then which_cmd=$(which ${cmd} 2>/dev/null || true) if [ -z "${which_cmd}" ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then echo "Creating ${binDir}/${cmd} symlink to k3s" ln -sf k3s ${binDir}/${cmd} else echo "Skipping ${binDir}/${cmd} symlink to k3s, command exists in PATH at ${which_cmd}" fi else echo "Skipping ${binDir}/${cmd} symlink to k3s, already exists" fi done } function k3s_modify_profiled() { # NOTE: this is still not in the path for sudo if [ ! -f "/etc/profile.d/k3s.sh" ]; then tee /etc/profile.d/k3s.sh > /dev/null < /etc/modules-load.d/replicated-ipvs.conf echo 'ip_vs' >> /etc/modules-load.d/replicated-ipvs.conf echo 'ip_vs_rr' >> /etc/modules-load.d/replicated-ipvs.conf echo 'ip_vs_wrr' >> /etc/modules-load.d/replicated-ipvs.conf echo 'ip_vs_sh' >> /etc/modules-load.d/replicated-ipvs.conf } function kubernetes_sysctl_config() { case "$LSB_DIST" in # TODO I've only seen these disabled on centos/rhel but should be safe for ubuntu centos|rhel|amzn|ol) echo "net.bridge.bridge-nf-call-ip6tables = 1" > /etc/sysctl.d/k8s.conf echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.d/k8s.conf echo "net.ipv4.conf.all.forwarding = 1" >> /etc/sysctl.d/k8s.conf sysctl --system ;; esac } # k8sVersion is an argument because this may be used to install step versions of K8s during an upgrade # to the target version function kubernetes_install_host_packages() { k8sVersion=$1 logStep "Install kubelet, kubectl and cni host packages" if kubernetes_host_commands_ok "$k8sVersion"; then logSuccess "Kubernetes host packages already installed" return fi if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then kubernetes_get_host_packages_online "$k8sVersion" kubernetes_get_conformance_packages_online "$k8sVersion" fi cat > "$DIR/tmp-kubeadm.conf" </dev/null } function kubernetes_workers() { kubectl get node --no-headers --selector='!node-role.kubernetes.io/master' 2>/dev/null } # exit 0 if there are any remote workers or masters function kubernetes_has_remotes() { if ! kubernetes_api_is_healthy; then # assume this is a new install return 1 fi local count=$(kubectl get nodes --no-headers --selector="kubernetes.io/hostname!=$(get_local_node_name)" 2>/dev/null | wc -l) if [ "$count" -gt "0" ]; then return 0 fi return 1 } function kubernetes_api_address() { if [ -n "$LOAD_BALANCER_ADDRESS" ]; then echo "${LOAD_BALANCER_ADDRESS}:${LOAD_BALANCER_PORT}" return fi echo "${PRIVATE_ADDRESS}:6443" } function kubernetes_api_is_healthy() { ${K8S_DISTRO}_api_is_healthy } function containerd_is_healthy() { ctr -a "$(${K8S_DISTRO}_get_containerd_sock)" images list &> /dev/null } function spinner_kubernetes_api_healthy() { if ! spinner_until 120 kubernetes_api_is_healthy; then bail "Kubernetes API failed to report healthy" fi } function spinner_containerd_is_healthy() { if ! spinner_until 120 containerd_is_healthy; then bail "Containerd failed to restart" fi } # With AWS NLB kubectl commands may fail to connect to the Kubernetes API immediately after a single # successful health check function spinner_kubernetes_api_stable() { for i in {1..10}; do sleep 1 spinner_kubernetes_api_healthy done } function kubernetes_drain() { kubectl drain "$1" \ --delete-local-data \ --ignore-daemonsets \ --force \ --grace-period=30 \ --timeout=120s \ --pod-selector 'app notin (rook-ceph-mon,rook-ceph-osd,rook-ceph-osd-prepare,rook-ceph-operator,rook-ceph-agent),k8s-app!=kube-dns' || true } function kubernetes_node_has_version() { local name="$1" local version="$2" local actual_version="$(try_1m kubernetes_node_kubelet_version $name)" [ "$actual_version" = "v${version}" ] } function kubernetes_node_kubelet_version() { local name="$1" kubectl get node "$name" -o=jsonpath='{@.status.nodeInfo.kubeletVersion}' } function kubernetes_any_remote_master_unupgraded() { while read -r master; do local name=$(echo $master | awk '{ print $1 }') if ! kubernetes_node_has_version "$name" "$KUBERNETES_VERSION"; then return 0 fi done < <(kubernetes_remote_masters) return 1 } function kubernetes_any_worker_unupgraded() { while read -r worker; do local name=$(echo $worker | awk '{ print $1 }') if ! kubernetes_node_has_version "$name" "$KUBERNETES_VERSION"; then return 0 fi done < <(kubernetes_workers) return 1 } function kubelet_version() { kubelet --version | cut -d ' ' -f 2 | sed 's/v//' } function kubernetes_scale_down() { local ns="$1" local kind="$2" local name="$3" if ! kubernetes_resource_exists "$ns" "$kind" "$name"; then return 0 fi kubectl -n "$ns" scale "$kind" "$name" --replicas=0 } function kubernetes_secret_value() { local ns="$1" local name="$2" local key="$3" kubectl -n "$ns" get secret "$name" -ojsonpath="{ .data.$key }" 2>/dev/null | base64 --decode } function kubernetes_is_master() { if [ "$MASTER" = "1" ]; then return 0 elif [ -f /etc/kubernetes/manifests/kube-apiserver.yaml ]; then return 0 else return 1 fi } function discover_pod_subnet() { local excluded="" if ! ip route show src "$PRIVATE_ADDRESS" | awk '{ print $1 }' | grep -q '/'; then excluded="--exclude-subnet=${PRIVATE_ADDRESS}/16" fi if [ -n "$POD_CIDR" ]; then local podCidrSize=$(echo $POD_CIDR | awk -F'/' '{ print $2 }') # if pod-cidr flag and pod-cidr-range are both set, validate pod-cidr is as large as pod-cidr-range if [ -n "$POD_CIDR_RANGE" ]; then if [ "$podCidrSize" -gt "$POD_CIDR_RANGE" ]; then bail "Pod cidr must be at least /$POD_CIDR_RANGE" fi fi # if pod cidr flag matches existing weave pod cidr don't validate if [ "$POD_CIDR" = "$EXISTING_POD_CIDR" ]; then return 0 elif [ -n "$EXISTING_POD_CIDR" ]; then bail "Pod cidr cannot be changed to $POD_CIDR because existing cidr is $EXISTING_POD_CIDR" fi if $DIR/bin/subnet --subnet-alloc-range "$POD_CIDR" --cidr-range "$podCidrSize" "$excluded" 1>/dev/null; then return 0 fi printf "${RED}Pod cidr ${POD_CIDR} overlaps with existing route. Continue? ${NC}" if ! confirmY ; then exit 1 fi return 0 fi # detected from weave device if [ -n "$EXISTING_POD_CIDR" ]; then POD_CIDR="$EXISTING_POD_CIDR" return 0 fi local size="$POD_CIDR_RANGE" if [ -z "$size" ]; then size="20" fi # find a network for the Pods, preferring start at 10.32.0.0 if podnet=$($DIR/bin/subnet --subnet-alloc-range "10.32.0.0/16" --cidr-range "$size" "$excluded"); then echo "Found pod network: $podnet" POD_CIDR="$podnet" return 0 fi if podnet=$($DIR/bin/subnet --subnet-alloc-range "10.0.0.0/8" --cidr-range "$size" "$excluded"); then echo "Found pod network: $podnet" POD_CIDR="$podnet" return 0 fi bail "Failed to find available subnet for pod network. Use the pod-cidr flag to set a pod network" } # This must run after discover_pod_subnet since it excludes the pod cidr function discover_service_subnet() { local excluded="--exclude-subnet=$POD_CIDR" if ! ip route show src "$PRIVATE_ADDRESS" | awk '{ print $1 }' | grep -q '/'; then excluded="$excluded,${PRIVATE_ADDRESS}/16" fi EXISTING_SERVICE_CIDR=$(maybe kubeadm_cluster_configuration | grep serviceSubnet | awk '{ print $2 }') if [ -n "$SERVICE_CIDR" ]; then local serviceCidrSize=$(echo $SERVICE_CIDR | awk -F'/' '{ print $2 }') # if service-cidr flag and service-cidr-range are both set, validate service-cidr is as large as service-cidr-range if [ -n "$SERVICE_CIDR_RANGE" ]; then if [ "$serviceCidrSize" -gt "$SERVICE_CIDR_RANGE" ]; then bail "Service cidr must be at least /$SERVICE_CIDR_RANGE" fi fi # if service-cidr flag matches existing service cidr don't validate if [ "$SERVICE_CIDR" = "$EXISTING_SERVICE_CIDR" ]; then return 0 elif [ -n "$EXISTING_SERVICE_CIDR" ]; then bail "Service cidr cannot be changed to $SERVICE_CIDR because existing cidr is $EXISTING_SERVICE_CIDR" fi if $DIR/bin/subnet --subnet-alloc-range "$SERVICE_CIDR" --cidr-range "$serviceCidrSize" "$excluded" 1>/dev/null; then return 0 fi printf "${RED}Service cidr ${SERVICE_CIDR} overlaps with existing route. Continue? ${NC}" if ! confirmY ; then exit 1 fi return 0 fi if [ -n "$EXISTING_SERVICE_CIDR" ]; then echo "Using existing service cidr ${EXISTING_SERVICE_CIDR}" SERVICE_CIDR="$EXISTING_SERVICE_CIDR" return 0 fi local size="$SERVICE_CIDR_RANGE" if [ -z "$size" ]; then size="22" fi # find a network for the services, preferring start at 10.96.0.0 if servicenet=$($DIR/bin/subnet --subnet-alloc-range "10.96.0.0/16" --cidr-range "$size" "$excluded"); then echo "Found service network: $servicenet" SERVICE_CIDR="$servicenet" return 0 fi if servicenet=$($DIR/bin/subnet --subnet-alloc-range "10.0.0.0/8" --cidr-range "$size" "$excluded"); then echo "Found service network: $servicenet" SERVICE_CIDR="$servicenet" return 0 fi bail "Failed to find available subnet for service network. Use the service-cidr flag to set a service network" } function kubernetes_node_images() { local nodeName="$1" kubectl get node "$nodeName" -ojsonpath="{range .status.images[*]}{ range .names[*] }{ @ }{'\n'}{ end }{ end }" } function list_all_required_images() { echo "$KURL_UTIL_IMAGE" find packages/kubernetes/$KUBERNETES_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' if [ -n "$STEP_VERSION" ]; then find packages/kubernetes/$STEP_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$DOCKER_VERSION" ]; then find packages/docker/$DOCKER_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$WEAVE_VERSION" ]; then find addons/weave/$WEAVE_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$ROOK_VERSION" ]; then find addons/rook/$ROOK_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$OPENEBS_VERSION" ]; then find addons/openebs/$OPENEBS_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$LONGHORN_VERSION" ]; then find addons/longhorn/$LONGHORN_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$MINIO_VERSION" ]; then find addons/minio/$MINIO_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$CONTOUR_VERSION" ]; then find addons/contour/$CONTOUR_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$REGISTRY_VERSION" ]; then find addons/registry/$REGISTRY_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$PROMETHEUS_VERSION" ]; then find addons/prometheus/$PROMETHEUS_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$KOTSADM_VERSION" ]; then find addons/kotsadm/$KOTSADM_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$FLUENTD_VERSION" ]; then find addons/fluentd/$FLUENTD_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$VELERO_VERSION" ]; then find addons/velero/$VELERO_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$EKCO_VERSION" ]; then find addons/ekco/$EKCO_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$CERT_MANAGER_VERSION" ]; then find addons/cert-manager/$CERT_MANAGER_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$METRICS_SERVER_VERSION" ]; then find addons/metrics-server/$METRICS_SERVER_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$SONOBUOY_VERSION" ]; then find addons/sonobuoy/$SONOBUOY_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi } function kubernetes_node_has_all_images() { local nodeName="$1" while read -r image; do if ! kubernetes_node_has_image "$nodeName" "$image"; then printf "\n${YELLOW}Node $nodeName missing image $image${NC}\n" return 1 fi done < <(list_all_required_images) } function kubernetes_node_has_image() { local node_name="$1" local image="$2" while read -r node_image; do if [ "$(canonical_image_name "$node_image")" = "$(canonical_image_name "$image")" ]; then return 0 fi done < <(kubernetes_node_images "$node_name") return 1 } KUBERNETES_REMOTE_PRIMARIES=() KUBERNETES_REMOTE_PRIMARY_VERSIONS=() function kubernetes_get_remote_primaries() { while read -r primary; do local name=$(echo $primary | awk '{ print $1 }') local version="$(try_1m kubernetes_node_kubelet_version $name)" KUBERNETES_REMOTE_PRIMARIES+=( $name ) KUBERNETES_REMOTE_PRIMARY_VERSIONS+=( $version ) done < <(kubernetes_remote_masters) } KUBERNETES_SECONDARIES=() KUBERNETES_SECONDARY_VERSIONS=() function kubernetes_get_secondaries() { while read -r secondary; do local name=$(echo $secondary | awk '{ print $1 }') local version="$(try_1m kubernetes_node_kubelet_version $name)" KUBERNETES_SECONDARIES+=( $name ) KUBERNETES_SECONDARY_VERSIONS+=( $version ) done < <(kubernetes_workers) } function kubernetes_load_balancer_address() { maybe kubeadm_cluster_configuration | grep 'controlPlaneEndpoint:' | sed 's/controlPlaneEndpoint: \|"//g' } function kubernetes_pod_started() { local name=$1 local namespace=$2 local phase=$(kubectl -n $namespace get pod $name -ojsonpath='{ .status.phase }') case "$phase" in Running|Failed|Succeeded) return 0 ;; esac return 1 } function kubernetes_pod_completed() { local name=$1 local namespace=$2 local phase=$(kubectl -n $namespace get pod $name -ojsonpath='{ .status.phase }') case "$phase" in Failed|Succeeded) return 0 ;; esac return 1 } function kubernetes_pod_succeeded() { local name="$1" local namespace="$2" local phase=$(kubectl -n $namespace get pod $name -ojsonpath='{ .status.phase }') [ "$phase" = "Succeeded" ] } function kubernetes_is_current_cluster() { local api_service_address="$1" if cat /etc/kubernetes/kubelet.conf 2>/dev/null | grep -q "${api_service_address}"; then return 0 fi if cat /opt/replicated/kubeadm.conf 2>/dev/null | grep -q "${api_service_address}"; then return 0 fi return 1 } function kubernetes_is_join_node() { if cat /opt/replicated/kubeadm.conf 2>/dev/null | grep -q 'kind: JoinConfiguration'; then return 0 fi return 1 } function kubernetes_is_installed() { if kubectl cluster-info >/dev/null 2>&1 ; then return 0 fi if ps aux | grep '[k]ubelet' ; then return 0 fi if commandExists kubelet ; then return 0 fi return 1 } function kubeadm_cluster_configuration() { kubectl get cm -o yaml -n kube-system kubeadm-config -ojsonpath='{ .data.ClusterConfiguration }' } function kubeadm_cluster_status() { kubectl get cm -o yaml -n kube-system kubeadm-config -ojsonpath='{ .data.ClusterStatus }' } function check_network() { logStep "Checking cluster networking" if ! kubernetes_any_node_ready; then echo "Waiting for node to report Ready" spinner_until 300 kubernetes_any_node_ready fi kubectl delete pods kurlnet-client kurlnet-server --force --grace-period=0 &>/dev/null || true cat </dev/null } function kubernetes_service_exists() { kubectl -n default get service kubernetes &>/dev/null } function kubernetes_all_nodes_ready() { local node_statuses= node_statuses="$(kubectl get nodes --no-headers 2>/dev/null | awk '{ print $2 }')" # no nodes are not ready and at least one node is ready if echo "${node_statuses}" | grep -q 'NotReady' && \ echo "${node_statuses}" | grep -v 'NotReady' | grep -q 'Ready' ; then return 1 fi return 0 } function kubernetes_any_node_ready() { if kubectl get nodes --no-headers 2>/dev/null | awk '{ print $2 }' | grep -v 'NotReady' | grep -q 'Ready' ; then return 0 fi return 1 } function object_store_exists() { if [ -n "$OBJECT_STORE_ACCESS_KEY" ] && \ [ -n "$OBJECT_STORE_SECRET_KEY" ] && \ [ -n "$OBJECT_STORE_CLUSTER_IP" ]; then return 0 else return 1 fi } function object_store_create_bucket() { if object_store_bucket_exists "$1" ; then echo "object store bucket $1 exists" return 0 fi if ! _object_store_create_bucket "$1" ; then if object_store_exists; then return 1 fi bail "attempted to create bucket $1 but no object store configured" fi echo "object store bucket $1 created" } function _object_store_create_bucket() { local bucket=$1 local acl="x-amz-acl:private" local d=$(LC_TIME="en_US.UTF-8" TZ="UTC" date +"%a, %d %b %Y %T %z") local string="PUT\n\n\n${d}\n${acl}\n/$bucket" local sig=$(echo -en "${string}" | openssl sha1 -hmac "${OBJECT_STORE_SECRET_KEY}" -binary | base64) curl -fsSL -X PUT \ --noproxy "*" \ -H "Host: $OBJECT_STORE_CLUSTER_IP" \ -H "Date: $d" \ -H "$acl" \ -H "Authorization: AWS $OBJECT_STORE_ACCESS_KEY:$sig" \ "http://$OBJECT_STORE_CLUSTER_IP/$bucket" >/dev/null 2>&1 } function object_store_bucket_exists() { local bucket=$1 local acl="x-amz-acl:private" local d=$(LC_TIME="en_US.UTF-8" TZ="UTC" date +"%a, %d %b %Y %T %z") local string="HEAD\n\n\n${d}\n${acl}\n/$bucket" local sig=$(echo -en "${string}" | openssl sha1 -hmac "${OBJECT_STORE_SECRET_KEY}" -binary | base64) curl -fsSL -I \ --noproxy "*" \ -H "Host: $OBJECT_STORE_CLUSTER_IP" \ -H "Date: $d" \ -H "$acl" \ -H "Authorization: AWS $OBJECT_STORE_ACCESS_KEY:$sig" \ "http://$OBJECT_STORE_CLUSTER_IP/$bucket" >/dev/null 2>&1 } function migrate_rgw_to_minio() { RGW_HOST="rook-ceph-rgw-rook-ceph-store.rook-ceph" RGW_ACCESS_KEY_ID=$(kubectl -n rook-ceph get secret rook-ceph-object-user-rook-ceph-store-kurl -o yaml | grep AccessKey | head -1 | awk '{print $2}' | base64 --decode) RGW_ACCESS_KEY_SECRET=$(kubectl -n rook-ceph get secret rook-ceph-object-user-rook-ceph-store-kurl -o yaml | grep SecretKey | head -1 | awk '{print $2}' | base64 --decode) MINIO_HOST="minio.${MINIO_NAMESPACE}" MINIO_ACCESS_KEY_ID=$(kubectl -n ${MINIO_NAMESPACE} get secret minio-credentials -ojsonpath='{ .data.MINIO_ACCESS_KEY }' | base64 --decode) MINIO_ACCESS_KEY_SECRET=$(kubectl -n ${MINIO_NAMESPACE} get secret minio-credentials -ojsonpath='{ .data.MINIO_SECRET_KEY }' | base64 --decode) MINIO_CLUSTER_IP=$(kubectl -n ${MINIO_NAMESPACE} get service minio | tail -n1 | awk '{ print $3}') get_shared kubectl delete pod sync-object-store --force --grace-period=0 &>/dev/null || true cat < /dev/null else bail "sync-object-store pod failed" fi # Update kotsadm to use minio if kubernetes_resource_exists default secret kotsadm-s3; then echo "Updating kotsadm to use minio" kubectl patch secret kotsadm-s3 -p "{\"stringData\":{\"access-key-id\":\"${MINIO_ACCESS_KEY_ID}\",\"secret-access-key\":\"${MINIO_ACCESS_KEY_SECRET}\",\"endpoint\":\"http://${MINIO_HOST}\",\"object-store-cluster-ip\":\"${MINIO_CLUSTER_IP}\"}}" if kubernetes_resource_exists default deployment kotsadm; then kubectl rollout restart deployment kotsadm elif kubernetes_resource_exists default statefulset kotsadm; then kubectl rollout restart statefulset kotsadm fi fi # Update registry to use minio if kubernetes_resource_exists kurl configmap registry-config; then echo "Updating registry to use minio" kubectl -n kurl get configmap registry-config -ojsonpath='{ .data.config\.yml }' | sed "s/regionendpoint: http.*/regionendpoint: http:\/\/${MINIO_CLUSTER_IP}/" > config.yml kubectl -n kurl delete configmap registry-config kubectl -n kurl create configmap registry-config --from-file=config.yml=config.yml rm config.yml fi if kubernetes_resource_exists kurl secret registry-s3-secret; then kubectl -n kurl patch secret registry-s3-secret -p "{\"stringData\":{\"access-key-id\":\"${MINIO_ACCESS_KEY_ID}\",\"secret-access-key\":\"${MINIO_ACCESS_KEY_SECRET}\",\"object-store-cluster-ip\":\"${MINIO_CLUSTER_IP}\"}}" fi if kubernetes_resource_exists kurl deployment registry; then kubectl -n kurl rollout restart deployment registry fi # Update velero to use minio only if currently using RGW since velero may have already been # updated to use an off-cluster object store. if kubernetes_resource_exists velero backupstoragelocation default; then echo "Updating velero to use minio" s3Url=$(kubectl -n velero get backupstoragelocation default -ojsonpath='{ .spec.config.s3Url }') if [ "$s3Url" = "http://${RGW_HOST}" ]; then kubectl -n velero patch backupstoragelocation default --type=merge -p "{\"spec\":{\"config\":{\"s3Url\":\"http://${MINIO_HOST}\",\"publicUrl\":\"http://${MINIO_CLUSTER_IP}\"}}}" while read -r resticrepo; do oldResticIdentifier=$(kubectl -n velero get resticrepositories "$resticrepo" -ojsonpath="{ .spec.resticIdentifier }") newResticIdentifier=$(echo "$oldResticIdentifier" | sed "s/${RGW_HOST}/${MINIO_HOST}/") kubectl -n velero patch resticrepositories "$resticrepo" --type=merge -p "{\"spec\":{\"resticIdentifier\":\"${newResticIdentifier}\"}}" done < <(kubectl -n velero get resticrepositories --selector=velero.io/storage-location=default --no-headers | awk '{ print $1 }') else echo "default backupstoragelocation was not rgw, skipping" fi fi if kubernetes_resource_exists velero secret cloud-credentials; then if kubectl -n velero get secret cloud-credentials -ojsonpath='{ .data.cloud }' | base64 -d | grep -q "$RGW_ACCESS_KEY_ID"; then kubectl -n velero get secret cloud-credentials -ojsonpath='{ .data.cloud }' | base64 -d > cloud sed -i "s/aws_access_key_id=.*/aws_access_key_id=${MINIO_ACCESS_KEY_ID}/" cloud sed -i "s/aws_secret_access_key=.*/aws_secret_access_key=${MINIO_ACCESS_KEY_SECRET}/" cloud cloud=$(cat cloud | base64 -w 0) kubectl -n velero patch secret cloud-credentials -p "{\"data\":{\"cloud\":\"${cloud}\"}}" rm cloud else echo "cloud-credentials secret were not for rgw, skipping" fi fi if kubernetes_resource_exists velero daemonset restic; then kubectl -n velero rollout restart daemonset restic fi if kubernetes_resource_exists velero deployment velero; then kubectl -n velero rollout restart deployment velero fi printf "\n${GREEN}Object store migration completed successfully${NC}\n" } export KUBECTL_PLUGINS_PATH=/usr/local/bin function install_plugins() { pushd "$DIR/krew" tar xzf outdated.tar.gz && mv outdated /usr/local/bin/kubectl-outdated tar xzf preflight.tar.gz && mv preflight /usr/local/bin/kubectl-preflight tar xzf support-bundle.tar.gz && mv support-bundle /usr/local/bin/kubectl-support_bundle popd # uninstall system-wide krew from old versions of kurl rm -rf /opt/replicated/krew sed -i '/^export KUBECTL_PLUGINS_PATH.*KREW_ROOT/d' /etc/profile sed -i '/^export KREW_ROOT.*replicated/d' /etc/profile } function install_kustomize() { if ! kubernetes_is_master; then return 0 elif [ ! -d "$DIR/packages/kubernetes/${k8sVersion}/assets" ]; then echo "Kustomize package is missing in your distribution. Skipping." return 0 fi kustomize_dir=/usr/local/bin pushd "$DIR/packages/kubernetes/${k8sVersion}/assets" for file in $(ls kustomize-*);do if [ "${file: -6}" == "tar.gz" ];then tar xf ${file} chmod a+x kustomize mv kustomize /usr/local/bin/${file%%.tar*} else # Earlier versions of kustomize weren't archived/compressed chmod a+x ${file} cp ${file} ${kustomize_dir} fi done popd if ls ${kustomize_dir}/kustomize-* 1>/dev/null 2>&1;then latest_binary=$(basename $(ls ${kustomize_dir}/kustomize-* | sort -V | tail -n 1)) # Link to the latest version ln -s -f ${kustomize_dir}/${latest_binary} ${kustomize_dir}/kustomize fi } function preflights() { require64Bit bailIfUnsupportedOS mustSwapoff prompt_if_docker_unsupported_os check_docker_k8s_version checkFirewalld checkUFW must_disable_selinux apply_iptables_config cri_preflights kotsadm_prerelease host_nameservers_reachable return 0 } function join_preflights() { preflights_require_no_kubernetes_or_current_node return 0 } function require_root_user() { local user="$(id -un 2>/dev/null || true)" if [ "$user" != "root" ]; then bail "Error: this installer needs to be run as root." fi } function require64Bit() { case "$(uname -m)" in *64) ;; *) echo >&2 'Error: you are not using a 64bit platform.' echo >&2 'This installer currently only supports 64bit platforms.' exit 1 ;; esac } function bailIfUnsupportedOS() { case "$LSB_DIST$DIST_VERSION" in ubuntu16.04|ubuntu18.04|ubuntu20.04) ;; rhel7.4|rhel7.5|rhel7.6|rhel7.7|rhel7.8|rhel7.9|rhel8.0|rhel8.1|rhel8.2|rhel8.3|rhel8.4) ;; centos7.4|centos7.5|centos7.6|centos7.7|centos7.8|centos7.9|centos8.0|centos8.1|centos8.2|centos8.3|centos8.4) ;; amzn2) ;; ol7.4|ol7.5|ol7.6|ol7.7|ol7.8|ol7.9|ol8.0|ol8.1|ol8.2|ol8.3|ol8.4) ;; *) bail "Kubernetes install is not supported on ${LSB_DIST} ${DIST_VERSION}" ;; esac } function mustSwapoff() { if swap_is_on || swap_is_enabled; then printf "\n${YELLOW}This application is incompatible with memory swapping enabled. Disable swap to continue?${NC} " if confirmY ; then printf "=> Running swapoff --all\n" swapoff --all if swap_fstab_enabled; then swap_fstab_disable fi if swap_service_enabled; then swap_service_disable fi if swap_azure_linux_agent_enabled; then swap_azure_linux_agent_disable fi logSuccess "Swap disabled.\n" else bail "\nDisable swap with swapoff --all and remove all swap entries from /etc/fstab before re-running this script" fi fi } function swap_is_on() { swapon --summary | grep --quiet " " # todo this could be more specific, swapon -s returns nothing if its off } function swap_is_enabled() { swap_fstab_enabled || swap_service_enabled || swap_azure_linux_agent_enabled } function swap_fstab_enabled() { cat /etc/fstab | grep --quiet --ignore-case --extended-regexp '^[^#]+swap' } function swap_fstab_disable() { printf "=> Commenting swap entries in /etc/fstab \n" sed --in-place=.bak '/\bswap\b/ s/^/#/' /etc/fstab printf "=> A backup of /etc/fstab has been made at /etc/fstab.bak\n\n" printf "\n${YELLOW}Changes have been made to /etc/fstab. We recommend reviewing them after completing this installation to ensure mounts are correctly configured.${NC}\n\n" sleep 5 # for emphasis of the above ^ } # This is a service on some Azure VMs that just enables swap function swap_service_enabled() { systemctl -q is-enabled temp-disk-swapfile 2>/dev/null } function swap_service_disable() { printf "=> Disabling temp-disk-swapfile service\n" systemctl disable temp-disk-swapfile } function swap_azure_linux_agent_enabled() { cat /etc/waagent.conf 2>/dev/null | grep -q 'ResourceDisk.EnableSwap=y' } function swap_azure_linux_agent_disable() { printf "=> Disabling swap in Azure Linux Agent configuration file /etc/waagent.conf\n" sed -i 's/ResourceDisk.EnableSwap=y/ResourceDisk.EnableSwap=n/g' /etc/waagent.conf } function check_docker_k8s_version() { local version= version="$(get_docker_version)" if [ -z "$version" ]; then return fi case "$KUBERNETES_TARGET_VERSION_MINOR" in 14|15) compareDockerVersions "$version" 1.13.1 if [ "$COMPARE_DOCKER_VERSIONS_RESULT" -eq "-1" ]; then bail "Minimum Docker version for Kubernetes $KUBERNETES_VERSION is 1.13.1." fi ;; esac } function prompt_if_docker_unsupported_os() { if is_docker_version_supported ; then return fi logWarn "Docker ${DOCKER_VERSION} is not supported on ${LSB_DIST} ${DIST_VERSION}." logWarn "The containerd addon is recommended. https://kurl.sh/docs/add-ons/containerd" if commandExists "docker" ; then return fi printf "${YELLOW}Continue? ${NC}" 1>&2 if ! confirmY ; then exit 1 fi } checkFirewalld() { if [ -n "$PRESERVE_DOCKER_CONFIG" ]; then return fi apply_firewalld_config if [ "$BYPASS_FIREWALLD_WARNING" = "1" ]; then return fi if ! systemctl -q is-active firewalld ; then return fi if [ "$HARD_FAIL_ON_FIREWALLD" = "1" ]; then printf "${RED}Firewalld is active${NC}\n" 1>&2 exit 1 fi if [ -n "$DISABLE_FIREWALLD" ]; then systemctl stop firewalld systemctl disable firewalld return fi printf "${YELLOW}Firewalld is active, please press Y to disable ${NC}" if confirmY ; then systemctl stop firewalld systemctl disable firewalld return fi printf "${YELLOW}Continue with firewalld active? ${NC}" if confirmN ; then BYPASS_FIREWALLD_WARNING=1 return fi exit 1 } checkUFW() { if [ -n "$PRESERVE_DOCKER_CONFIG" ]; then return fi if [ "$BYPASS_UFW_WARNING" = "1" ]; then return fi # check if UFW is enabled and installed in systemctl if ! systemctl -q is-active ufw ; then return fi # check if UFW is active/inactive UFW_STATUS=$(ufw status | grep 'Status: ' | awk '{ print $2 }') if [ "$UFW_STATUS" = "inactive" ]; then return fi if [ "$HARD_FAIL_ON_UFW" = "1" ]; then printf "${RED}UFW is active${NC}\n" 1>&2 exit 1 fi if [ -n "$DISABLE_UFW" ]; then ufw disable return fi printf "${YELLOW}UFW is active, please press Y to disable ${NC}" if confirmY ; then ufw disable return fi printf "${YELLOW}Continue with ufw active? ${NC}" if confirmN ; then BYPASS_UFW_WARNING=1 return fi exit 1 } must_disable_selinux() { # From kubernets kubeadm docs for RHEL: # # Disabling SELinux by running setenforce 0 is required to allow containers to # access the host filesystem, which is required by pod networks for example. # You have to do this until SELinux support is improved in the kubelet. # Check and apply YAML overrides if [ -n "$PRESERVE_SELINUX_CONFIG" ]; then return fi apply_selinux_config if [ -n "$BYPASS_SELINUX_PREFLIGHT" ]; then return fi if selinux_enabled && selinux_enforced ; then if [ -n "$DISABLE_SELINUX" ]; then setenforce 0 sed -i s/^SELINUX=.*$/SELINUX=permissive/ /etc/selinux/config return fi printf "\n${YELLOW}Kubernetes is incompatible with SELinux. Disable SELinux to continue?${NC} " if confirmY ; then setenforce 0 sed -i s/^SELINUX=.*$/SELINUX=permissive/ /etc/selinux/config else bail "\nDisable SELinux with 'setenforce 0' before re-running install script" fi fi } function force_docker() { DOCKER_VERSION="19.03.4" echo "NO CRI version was listed in yaml or found on host OS, defaulting to online docker install" echo "THIS FEATURE IS NOT SUPPORTED AND WILL BE DEPRECATED IN FUTURE KURL VERSIONS" } function cri_preflights() { require_cri } function require_cri() { if commandExists docker ; then SKIP_DOCKER_INSTALL=1 return 0 fi if commandExists ctr ; then return 0 fi if [ "$LSB_DIST" = "rhel" ]; then if [ -n "$NO_CE_ON_EE" ]; then printf "${RED}Enterprise Linux distributions require Docker Enterprise Edition. Please install Docker before running this installation script.${NC}\n" 1>&2 return 0 fi fi if [ "$SKIP_DOCKER_INSTALL" = "1" ]; then bail "Docker is required" fi if [ -z "$DOCKER_VERSION" ] && [ -z "$CONTAINERD_VERSION" ]; then force_docker fi return 0 } selinux_enabled() { if commandExists "selinuxenabled"; then selinuxenabled return elif commandExists "sestatus"; then ENABLED=$(sestatus | grep 'SELinux status' | awk '{ print $3 }') echo "$ENABLED" | grep --quiet --ignore-case enabled return fi return 1 } selinux_enforced() { if commandExists "getenforce"; then ENFORCED=$(getenforce) echo $(getenforce) | grep --quiet --ignore-case enforcing return elif commandExists "sestatus"; then ENFORCED=$(sestatus | grep 'SELinux mode' | awk '{ print $3 }') echo "$ENFORCED" | grep --quiet --ignore-case enforcing return fi return 1 } function kotsadm_prerelease() { if [ -n "$TESTGRID_ID" ]; then printf "\n${YELLOW}This is a prerelease version of kotsadm and should not be run in production. Continuing because this is testgrid.${NC} " return 0 fi if [ "$KOTSADM_VERSION" = "alpha" ] || [ "$KOTSADM_VERSION" = "nightly" ]; then printf "\n${YELLOW}This is a prerelease version of kotsadm and should not be run in production. Press Y to continue.${NC} " if ! confirmN; then bail "\nWill not install prerelease version of kotsadm." fi fi } function host_nameservers_reachable() { if [ -n "$NAMESERVER" ] || [ "$AIRGAP" = "1" ]; then return 0 fi if ! discover_non_loopback_nameservers; then bail "\nAt least one nameserver must be accessible on a non-loopback address. Use the \"nameserver\" flag in the installer spec to override the loopback nameservers discovered on the host: https://kurl.sh/docs/add-ons/kurl" fi } function preflights_require_no_kubernetes_or_current_node() { if kubernetes_is_join_node ; then if kubernetes_is_current_cluster "${API_SERVICE_ADDRESS}" ; then return 0 fi logWarn "Kubernetes is already installed on this Node but the api server endpoint is different." printf "${YELLOW}Are you sure you want to proceed? ${NC}" 1>&2 if ! confirmN; then exit 1 fi return 0 fi if kubernetes_is_installed ; then bail "Kubernetes is already installed on this Node." fi return 0 } HOST_PREFLIGHTS_RESULTS_OUTPUT_DIR="host-preflights" function host_preflights() { local is_primary="$1" local is_join="$2" local is_upgrade="$3" local opts= local out_file= out_file="${DIR}/${HOST_PREFLIGHTS_RESULTS_OUTPUT_DIR}/results-$(date +%s).txt" mkdir -p "${DIR}/${HOST_PREFLIGHTS_RESULTS_OUTPUT_DIR}" if [ "${PREFLIGHT_IGNORE_WARNINGS}" = "1" ] || ! prompts_can_prompt ; then opts="${opts} --ignore-warnings" fi if [ "${is_primary}" != "1" ]; then opts="${opts} --is-primary=false" fi if [ "${is_join}" = "1" ]; then opts="${opts} --is-join" fi if [ "${is_upgrade}" = "1" ]; then opts="${opts} --is-upgrade" fi for spec in $("${K8S_DISTRO}_addon_for_each" addon_preflight); do opts="${opts} --spec=${spec}" done if [ -n "$PRIMARY_HOST" ]; then opts="${opts} --primary-host=${PRIMARY_HOST}" fi if [ -n "$SECONDARY_HOST" ]; then opts="${opts} --secondary-host=${SECONDARY_HOST}" fi logStep "Running host preflights" if [ "${PREFLIGHT_IGNORE}" = "1" ]; then "${DIR}"/bin/kurl host preflight "${MERGED_YAML_SPEC}" ${opts} | tee "${out_file}" host_preflights_mkresults "${out_file}" "${opts}" # TODO: report preflight fail else # interactive terminal if prompts_can_prompt; then set +e "${DIR}"/bin/kurl host preflight "${MERGED_YAML_SPEC}" ${opts} "${tmp_file}" && mv "${tmp_file}" "${out_file}" chmod -R +r "${DIR}/${HOST_PREFLIGHTS_RESULTS_OUTPUT_DIR}/" # make sure the file is readable by kots support bundle rm -f "${tmp_file}" } # Gather any additional information required from the user that could not be discovered and was not # passed with a flag function prompts_can_prompt() { # Need the TTY to accept input and stdout to display # Prompts when running the script through the terminal but not as a subshell if [ -t 1 ] && [ -c /dev/tty ]; then return 0 fi return 1 } function prompt() { if ! prompts_can_prompt ; then bail "Cannot prompt, shell is not interactive" fi set +e read PROMPT_RESULT < /dev/tty set -e } function confirmY() { printf "(Y/n) " if [ "$ASSUME_YES" = "1" ]; then echo "Y" return 0 fi if ! prompts_can_prompt ; then echo "Y" logWarn "Automatically accepting prompt, shell is not interactive" return 0 fi prompt if [ "$PROMPT_RESULT" = "n" ] || [ "$PROMPT_RESULT" = "N" ]; then return 1 fi return 0 } function confirmN() { printf "(y/N) " if [ "$ASSUME_YES" = "1" ]; then echo "Y" return 0 fi if ! prompts_can_prompt ; then echo "N" logWarn "Automatically declining prompt, shell is not interactive" return 1 fi prompt if [ "$PROMPT_RESULT" = "y" ] || [ "$PROMPT_RESULT" = "Y" ]; then return 0 fi return 1 } function join_prompts() { if [ -n "$API_SERVICE_ADDRESS" ]; then splitHostPort "$API_SERVICE_ADDRESS" if [ -z "$PORT" ]; then PORT="6443" fi KUBERNETES_MASTER_ADDR="$HOST" KUBERNETES_MASTER_PORT="$PORT" LOAD_BALANCER_ADDRESS="$HOST" LOAD_BALANCER_PORT="$PORT" else prompt_for_master_address splitHostPort "$KUBERNETES_MASTER_ADDR" if [ -n "$PORT" ]; then KUBERNETES_MASTER_ADDR="$HOST" KUBERNETES_MASTER_PORT="$PORT" else KUBERNETES_MASTER_PORT="6443" fi LOAD_BALANCER_ADDRESS="$KUBERNETES_MASTER_ADDR" LOAD_BALANCER_PORT="$KUBERNETES_MASTER_PORT" API_SERVICE_ADDRESS="${KUBERNETES_MASTER_ADDR}:${KUBERNETES_MASTER_PORT}" fi prompt_for_token prompt_for_token_ca_hash } function prompt_for_token() { if [ -n "$KUBEADM_TOKEN" ]; then return fi if ! prompts_can_prompt ; then bail "kubernetes.kubeadmToken required" fi printf "Please enter the kubernetes discovery token.\n" while true; do printf "Kubernetes join token: " prompt if [ -n "$PROMPT_RESULT" ]; then KUBEADM_TOKEN="$PROMPT_RESULT" return fi done } function prompt_for_token_ca_hash() { if [ -n "$KUBEADM_TOKEN_CA_HASH" ]; then return fi if ! prompts_can_prompt ; then bail "kubernetes.kubeadmTokenCAHash required" fi printf "Please enter the discovery token CA's hash.\n" while true; do printf "Kubernetes discovery token CA hash: " prompt if [ -n "$PROMPT_RESULT" ]; then KUBEADM_TOKEN_CA_HASH="$PROMPT_RESULT" return fi done } function prompt_for_master_address() { if [ -n "$KUBERNETES_MASTER_ADDR" ]; then return fi if ! prompts_can_prompt ; then bail "kubernetes.masterAddress required" fi printf "Please enter the Kubernetes master address.\n" printf "e.g. 10.128.0.4\n" while true; do printf "Kubernetes master address: " prompt if [ -n "$PROMPT_RESULT" ]; then KUBERNETES_MASTER_ADDR="$PROMPT_RESULT" return fi done } function common_prompts() { if [ -z "$PRIVATE_ADDRESS" ]; then prompt_for_private_ip fi # TODO public address? only required for adding SAN to K8s API server cert prompt_airgap_preload_images if [ "$HA_CLUSTER" = "1" ]; then prompt_for_load_balancer_address fi } function prompt_license() { if [ -n "$LICENSE_URL" ]; then if [ "$AIRGAP" = "1" ]; then bail "License Agreements with Airgap installs are not supported yet.\n" return fi curl --fail $LICENSE_URL || bail "Failed to fetch license at url: $LICENSE_URL" printf "\n\nThe license text is reproduced above. To view the license in your browser visit $LICENSE_URL.\n\n" printf "Do you accept the license agreement?" if confirmN; then printf "License Agreement Accepted. Continuing Installation.\n" else bail "License Agreement Not Accepted. 'y' or 'Y' needed to accept. Exiting installation." fi fi } function prompt_for_load_balancer_address() { local lastLoadBalancerAddress= if kubeadm_cluster_configuration >/dev/null 2>&1; then lastLoadBalancerAddress="$(kubeadm_cluster_configuration | grep 'controlPlaneEndpoint:' | sed 's/controlPlaneEndpoint: \|"//g')" if [ -n "$lastLoadBalancerAddress" ]; then splitHostPort "$lastLoadBalancerAddress" if [ "$HOST" = "$lastLoadBalancerAddress" ]; then lastLoadBalancerAddress="$lastLoadBalancerAddress:6443" fi fi fi if [ -n "$LOAD_BALANCER_ADDRESS" ] && [ -n "$lastLoadBalancerAddress" ]; then splitHostPort "$LOAD_BALANCER_ADDRESS" if [ "$HOST" = "$LOAD_BALANCER_ADDRESS" ]; then LOAD_BALANCER_ADDRESS="$LOAD_BALANCER_ADDRESS:6443" fi if [ "$LOAD_BALANCER_ADDRESS" != "$lastLoadBalancerAddress" ]; then LOAD_BALANCER_ADDRESS_CHANGED=1 fi fi if [ -z "$LOAD_BALANCER_ADDRESS" ] && [ -n "$lastLoadBalancerAddress" ]; then LOAD_BALANCER_ADDRESS="$lastLoadBalancerAddress" fi if [ -z "$LOAD_BALANCER_ADDRESS" ]; then if ! prompts_can_prompt ; then bail "kubernetes.loadBalancerAddress required" fi if [ -n "$EKCO_VERSION" ] && semverCompare "$EKCO_VERSION" "0.11.0" && [ "$SEMVER_COMPARE_RESULT" -ge "0" ]; then printf "\nIf you would like to bring your own load balancer to route external and internal traffic to the API servers, please enter a load balancer address.\n" printf "HAProxy will be used to perform this load balancing internally if you do not provide a load balancer address.\n" printf "Load balancer address: " prompt LOAD_BALANCER_ADDRESS="$PROMPT_RESULT" if [ -z "$LOAD_BALANCER_ADDRESS" ]; then EKCO_ENABLE_INTERNAL_LOAD_BALANCER=1 fi else printf "Please enter a load balancer address to route external and internal traffic to the API servers.\n" printf "In the absence of a load balancer address, all traffic will be routed to the first master.\n" printf "Load balancer address: " prompt LOAD_BALANCER_ADDRESS="$PROMPT_RESULT" if [ -z "$LOAD_BALANCER_ADDRESS" ]; then LOAD_BALANCER_ADDRESS="$PRIVATE_ADDRESS" LOAD_BALANCER_PORT=6443 fi fi fi if [ -z "$LOAD_BALANCER_PORT" ]; then splitHostPort "$LOAD_BALANCER_ADDRESS" LOAD_BALANCER_ADDRESS="$HOST" LOAD_BALANCER_PORT="$PORT" fi if [ -z "$LOAD_BALANCER_PORT" ]; then LOAD_BALANCER_PORT=6443 fi $BIN_BASHTOYAML -c $MERGED_YAML_SPEC -f "load-balancer-address=${LOAD_BALANCER_ADDRESS}:${LOAD_BALANCER_PORT}" } # if remote nodes are in the cluster and this is an airgap install, prompt the user to run the # load-images task on all remotes before proceeding because remaining steps may cause pods to # be scheduled on those nodes with new images. function prompt_airgap_preload_images() { if [ "$AIRGAP" != "1" ]; then return 0 fi if ! kubernetes_has_remotes; then return 0 fi local unattended_nodes_missing_images=0 while read -r node; do local nodeName=$(echo "$node" | awk '{ print $1 }') if [ "$nodeName" = "$(get_local_node_name)" ]; then continue fi if kubernetes_node_has_all_images "$nodeName"; then continue fi local kurl_install_directory_flag="$(get_kurl_install_directory_flag "${KURL_INSTALL_DIRECTORY_FLAG}")" printf "\nRun this script on node ${GREEN}${nodeName}${NC} to load required images before proceeding:\n" printf "\n" printf "${GREEN}\tcat ./tasks.sh | sudo bash -s load-images${kurl_install_directory_flag}${NC}" printf "\n" if [ "${KURL_IGNORE_REMOTE_LOAD_IMAGES_PROMPT}" != "1" ]; then if ! prompts_can_prompt ; then unattended_nodes_missing_images=1 continue fi while true; do echo "" printf "Have images been loaded on node ${nodeName}? " if confirmN ; then break fi done else logWarn "Remote load-images task prompt explicitly ignored" fi done < <(kubectl get nodes --no-headers) if [ "$unattended_nodes_missing_images" = "1" ] ; then bail "Preloading images required" fi } function prompt_for_private_ip() { _count=0 _regex="^[[:digit:]]+: ([^[:space:]]+)[[:space:]]+[[:alnum:]]+ ([[:digit:].]+)" while read -r _line; do [[ $_line =~ $_regex ]] if [ "${BASH_REMATCH[1]}" != "lo" ] && [ "${BASH_REMATCH[1]}" != "kube-ipvs0" ] && [ "${BASH_REMATCH[1]}" != "docker0" ] && [ "${BASH_REMATCH[1]}" != "weave" ]; then _iface_names[$((_count))]=${BASH_REMATCH[1]} _iface_addrs[$((_count))]=${BASH_REMATCH[2]} let "_count += 1" fi done <<< "$(ip -4 -o addr)" if [ "$_count" -eq "0" ]; then echo >&2 "Error: The installer couldn't discover any valid network interfaces on this machine." echo >&2 "Check your network configuration and re-run this script again." echo >&2 "If you want to skip this discovery process, pass the 'private-address' arg to this script, e.g. 'sudo ./install.sh private-address=1.2.3.4'" exit 1 elif [ "$_count" -eq "1" ]; then PRIVATE_ADDRESS=${_iface_addrs[0]} printf "The installer will use network interface '%s' (with IP address '%s')\n" "${_iface_names[0]}" "${_iface_addrs[0]}" return fi if ! prompts_can_prompt ; then bail "kurl.privateAddress required" fi printf "The installer was unable to automatically detect the private IP address of this machine.\n" printf "Please choose one of the following network interfaces:\n" for i in $(seq 0 $((_count-1))); do printf "[%d] %-5s\t%s\n" "$i" "${_iface_names[$i]}" "${_iface_addrs[$i]}" done while true; do printf "Enter desired number (0-%d): " "$((_count-1))" prompt if [ -z "$PROMPT_RESULT" ]; then continue fi if [ "$PROMPT_RESULT" -ge "0" ] && [ "$PROMPT_RESULT" -lt "$_count" ]; then PRIVATE_ADDRESS=${_iface_addrs[$PROMPT_RESULT]} printf "The installer will use network interface '%s' (with IP address '%s').\n" "${_iface_names[$PROMPT_RESULT]}" "$PRIVATE_ADDRESS" return fi done } function proxy_bootstrap() { if [ -n "$HTTP_PROXY" ]; then ENV_PROXY_ADDRESS="$HTTP_PROXY" export https_proxy="$HTTP_PROXY" printf "The installer will use the proxy at '%s' (imported from env var 'HTTP_PROXY')\n" "$ENV_PROXY_ADDRESS" elif [ -n "$http_proxy" ]; then ENV_PROXY_ADDRESS="$http_proxy" export https_proxy="$http_proxy" printf "The installer will use the proxy at '%s' (imported from env var 'http_proxy')\n" "$ENV_PROXY_ADDRESS" elif [ -n "$HTTPS_PROXY" ]; then ENV_PROXY_ADDRESS="$HTTPS_PROXY" printf "The installer will use the proxy at '%s' (imported from env var 'HTTPS_PROXY')\n" "$ENV_PROXY_ADDRESS" elif [ -n "$https_proxy" ]; then ENV_PROXY_ADDRESS="$https_proxy" printf "The installer will use the proxy at '%s' (imported from env var 'https_proxy')\n" "$ENV_PROXY_ADDRESS" fi if [ -n "$NO_PROXY" ]; then ENV_NO_PROXY="$NO_PROXY" elif [ -n "$no_proxy" ]; then ENV_NO_PROXY="$no_proxy" fi # Need to peek at the yaml spec to find if a proxy is needed to download the util binaries if [ -n "$INSTALLER_SPEC_FILE" ]; then local overrideProxy=$(grep "proxyAddress:" "$INSTALLER_SPEC_FILE" | grep -o "http[^'\" ]*") if [ -n "$overrideProxy" ]; then export https_proxy="$overrideProxy" kubectl_no_proxy echo "Bootstrapped proxy address from installer spec file: $https_proxy" return fi fi local proxy=$(echo "$INSTALLER_YAML" | grep "proxyAddress:" | grep -o "http[^'\" ]*") if [ -n "$proxy" ]; then export https_proxy="$proxy" kubectl_no_proxy echo "Bootstrapped proxy address from installer yaml: $https_proxy" return fi if [ -n "$ENV_PROXY_ADDRESS" ]; then export https_proxy="$ENV_PROXY_ADDRESS" kubectl_no_proxy return fi } function kubectl_no_proxy() { if [ ! -f /etc/kubernetes/admin.conf ]; then return fi kubectlEndpoint=$(cat /etc/kubernetes/admin.conf | grep 'server:' | awk '{ print $NF }' | sed -E 's/https?:\/\///g') splitHostPort "$kubectlEndpoint" if [ -n "$no_proxy" ]; then export no_proxy="$no_proxy,$HOST" else export no_proxy="$HOST" fi } function configure_proxy() { if [ "$NO_PROXY" = "1" ]; then echo "Not using http proxy" unset PROXY_ADDRESS unset http_proxy unset HTTP_PROXY unset https_proxy unset HTTPS_PROXY return fi if [ -z "$PROXY_ADDRESS" ] && [ -z "$ENV_PROXY_ADDRESS" ]; then return fi if [ -z "$PROXY_ADDRESS" ]; then PROXY_ADDRESS="$ENV_PROXY_ADDRESS" fi export https_proxy="$PROXY_ADDRESS" echo "Using proxy address $PROXY_ADDRESS" } function configure_no_proxy_preinstall() { if [ -z "$PROXY_ADDRESS" ]; then return fi local addresses="localhost,127.0.0.1,.svc,.local,.default,kubernetes" if [ -n "$ENV_NO_PROXY" ]; then addresses="${addresses},${ENV_NO_PROXY}" fi if [ -n "$PRIVATE_ADDRESS" ]; then addresses="${addresses},${PRIVATE_ADDRESS}" fi if [ -n "$LOAD_BALANCER_ADDRESS" ]; then addresses="${addresses},${LOAD_BALANCER_ADDRESS}" fi if [ -n "$ADDITIONAL_NO_PROXY_ADDRESSES" ]; then addresses="${addresses},${ADDITIONAL_NO_PROXY_ADDRESSES}" fi # filter duplicates addresses=$(echo "$addresses" | sed 's/,/\n/g' | sort | uniq | paste -s --delimiters=",") # kubeadm requires this in the environment to reach K8s masters export no_proxy="$addresses" NO_PROXY_ADDRESSES="$addresses" echo "Exported no_proxy: $no_proxy" } function configure_no_proxy() { if [ -z "$PROXY_ADDRESS" ]; then return fi local addresses="localhost,127.0.0.1,.svc,.local,.default,kubernetes" if [ -n "$ENV_NO_PROXY" ]; then addresses="${addresses},${ENV_NO_PROXY}" fi if [ -n "$KOTSADM_VERSION" ]; then addresses="${addresses},kotsadm-api-node" fi if [ -n "$ROOK_VERSION" ]; then addresses="${addresses},.rook-ceph" fi if [ -n "$FLUENTD_VERSION" ]; then addresses="${addresses},.logging" fi if [ -n "$REGISTRY_VERSION" ]; then addresses="${addresses},.kurl" fi if [ -n "$PROMETHEUS_VERSION" ]; then addresses="${addresses},.monitoring" fi if [ -n "$VELERO_VERSION" ] && [ -n "$VELERO_NAMESPACE" ]; then addresses="${addresses},.${VELERO_NAMESPACE}" fi if [ -n "$MINIO_VERSION" ] && [ -n "$MINIO_NAMESPACE" ]; then addresses="${addresses},.${MINIO_NAMESPACE}" fi if [ -n "$PRIVATE_ADDRESS" ]; then addresses="${addresses},${PRIVATE_ADDRESS}" fi if [ -n "$LOAD_BALANCER_ADDRESS" ]; then addresses="${addresses},${LOAD_BALANCER_ADDRESS}" fi if [ -n "$KUBERNETES_MASTER_ADDR" ]; then addresses="${addresses},${KUBERNETES_MASTER_ADDR}" fi if [ -n "$POD_CIDR" ]; then addresses="${addresses},${POD_CIDR}" fi if [ -n "$SERVICE_CIDR" ]; then addresses="${addresses},${SERVICE_CIDR}" fi if [ -n "$ADDITIONAL_NO_PROXY_ADDRESSES" ]; then addresses="${addresses},${ADDITIONAL_NO_PROXY_ADDRESSES}" fi # filter duplicates addresses=$(echo "$addresses" | sed 's/,/\n/g' | sort | uniq | paste -s --delimiters=",") # kubeadm requires this in the environment to reach K8s masters export no_proxy="$addresses" NO_PROXY_ADDRESSES="$addresses" echo "Exported no_proxy: $no_proxy" } REPORTING_CONTEXT_INFO="" INSTALLATION_ID= TESTGRID_ID= function report_install_start() { # report that the install started # this includes the install ID, time, kurl URL, and linux distribution name + version. # TODO: HA status, server CPU count and memory size. # if airgapped, don't create an installation ID and return early if [ "$AIRGAP" == "1" ]; then return 0 fi # if DISABLE_REPORTING is set, don't create an installation ID (which thus disables all the other reporting calls) and return early if [ "${DISABLE_REPORTING}" = "1" ]; then return 0 fi INSTALLATION_ID=$(< /dev/urandom tr -dc a-z0-9 | head -c16) local started=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # rfc3339 if [ -f "/tmp/testgrid-id" ]; then TESTGRID_ID=$(cat /tmp/testgrid-id) fi # Determine if it is the first kurl install if kubernetes_resource_exists kube-system configmap kurl-config; then curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ -d "{\"started\": \"$started\", \"os\": \"$LSB_DIST $DIST_VERSION\", \"kernel_version\": \"$KERNEL_MAJOR.$KERNEL_MINOR\", \"kurl_url\": \"$KURL_URL\", \"installer_id\": \"$INSTALLER_ID\", \"testgrid_id\": \"$TESTGRID_ID\", \"machine_id\": \"$MACHINE_ID\", \"is_upgrade\": true}" \ $REPLICATED_APP_URL/kurl_metrics/start_install/$INSTALLATION_ID || true else curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ -d "{\"started\": \"$started\", \"os\": \"$LSB_DIST $DIST_VERSION\", \"kernel_version\": \"$KERNEL_MAJOR.$KERNEL_MINOR\", \"kurl_url\": \"$KURL_URL\", \"installer_id\": \"$INSTALLER_ID\", \"testgrid_id\": \"$TESTGRID_ID\", \"machine_id\": \"$MACHINE_ID\", \"is_upgrade\": false}" \ $REPLICATED_APP_URL/kurl_metrics/start_install/$INSTALLATION_ID || true fi } function report_install_success() { # report that the install finished successfully # if INSTALLATION_ID is empty reporting is disabled if [ -z "$INSTALLATION_ID" ]; then return 0 fi local completed=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # rfc3339 curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ -d "{\"finished\": \"$completed\", \"machine_id\": \"$MACHINE_ID\"}" \ $REPLICATED_APP_URL/kurl_metrics/finish_install/$INSTALLATION_ID || true } function report_install_fail() { # report that the install failed local cause=$1 # if INSTALLATION_ID is empty reporting is disabled if [ -z "$INSTALLATION_ID" ]; then return 0 fi local completed=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # rfc3339 curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ -d "{\"finished\": \"$completed\", \"cause\": \"$cause\", \"machine_id\": \"$MACHINE_ID\"}" \ $REPLICATED_APP_URL/kurl_metrics/fail_install/$INSTALLATION_ID || true } function report_addon_start() { # report that an addon started installation local name=$1 local version=$2 # if INSTALLATION_ID is empty reporting is disabled if [ -z "$INSTALLATION_ID" ]; then return 0 fi local started=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # rfc3339 curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ -d "{\"started\": \"$started\", \"addon_version\": \"$version\", \"testgrid_id\": \"$TESTGRID_ID\", \"machine_id\": \"$MACHINE_ID\"}" \ $REPLICATED_APP_URL/kurl_metrics/start_addon/$INSTALLATION_ID/$name || true } function report_addon_success() { # report that an addon installed successfully local name=$1 local version=$2 # if INSTALLATION_ID is empty reporting is disabled if [ -z "$INSTALLATION_ID" ]; then return 0 fi local completed=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # rfc3339 curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ -d "{\"finished\": \"$completed\", \"machine_id\": \"$MACHINE_ID\"}" \ $REPLICATED_APP_URL/kurl_metrics/finish_addon/$INSTALLATION_ID/$name || true } function ctrl_c() { trap - SIGINT # reset SIGINT handler to default - someone should be able to ctrl+c the support bundle collector read line file <<<$(caller) printf "${YELLOW}Trapped ctrl+c on line $line${NC}\n" local totalStack totalStack=$(stacktrace) local infoString="with stack $totalStack - bin utils $KURL_BIN_UTILS_FILE - context $REPORTING_CONTEXT_INFO" if [ -z "$SUPPORT_BUNDLE_READY" ]; then report_install_fail "trapped ctrl+c before completing k8s install $infoString" exit 1 fi report_install_fail "trapped ctrl+c $infoString" collect_support_bundle exit 1 # exit with error } # unused function addon_install_fail() { # report that an addon failed to install successfully local name=$1 local version=$2 # if INSTALLATION_ID is empty reporting is disabled if [ -z "$INSTALLATION_ID" ]; then return 1 # return error because the addon in question did too fi local completed=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # rfc3339 curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ -d "{\"finished\": \"$completed\", \"machine_id\": \"$MACHINE_ID\"}" \ $REPLICATED_APP_URL/kurl_metrics/fail_addon/$INSTALLATION_ID/$name || true # provide an option for a user to provide a support bundle printf "${YELLOW}Addon ${name} ${version} failed to install${NC}\n" collect_support_bundle return 1 # return error because the addon in question did too } # unused function addon_install_fail_nobundle() { # report that an addon failed to install successfully local name=$1 local version=$2 # if INSTALLATION_ID is empty reporting is disabled if [ -z "$INSTALLATION_ID" ]; then return 1 # return error because the addon in question did too fi local completed=$(date -u +"%Y-%m-%dT%H:%M:%SZ") # rfc3339 curl -s --output /dev/null -H 'Content-Type: application/json' --max-time 5 \ -d "{\"finished\": \"$completed\", \"machine_id\": \"$MACHINE_ID\"}" \ $REPLICATED_APP_URL/kurl_metrics/fail_addon/$INSTALLATION_ID/$name || true return 1 # return error because the addon in question did too } function collect_support_bundle() { trap - SIGINT # reset SIGINT handler to default - someone should be able to ctrl+c the support bundle collector # if someone has set ASSUME_YES, we shouldn't automatically upload a support bundle if [ "$ASSUME_YES" = "1" ]; then return 0 fi if ! prompts_can_prompt ; then return 0 fi printf "${YELLOW}Would you like to provide a support bundle to aid us in avoiding similar errors in the future?${NC}\n" if ! confirmN; then return 0 fi printf "${YELLOW}Please provide your work email address for our records (this is not a support ticket):${NC}\n" prompt local email_address="" if [ -n "$PROMPT_RESULT" ]; then email_address="$PROMPT_RESULT" fi printf "${YELLOW}Could you provide a quick description of the issue you encountered?${NC}\n" prompt local issue_description="" if [ -n "$PROMPT_RESULT" ]; then issue_description="$PROMPT_RESULT" fi path_add "/usr/local/bin" #ensure /usr/local/bin/kubectl-support_bundle is in the path # collect support bundle printf "Collecting support bundle now:" kubectl support-bundle https://kots.io # find the support bundle filename local support_bundle_filename=$(find . -type f -name "support-bundle-*.tar.gz" | sort -r | head -n 1) curl 'https://support-bundle-secure-upload.replicated.com/v1/upload' \ -H 'accept: application/json, text/plain, */*' \ -X POST \ -H "Content-Type: multipart/form-data" \ -F "data={\"first_name\":\"kurl.sh\",\"last_name\":\"installer\",\"email_address\":\"${email_address}\",\"company\":\"\",\"description\":\"${issue_description}\"}" \ -F "file=@${support_bundle_filename}" \ --compressed printf "\nSupport bundle uploaded!\n" } function trap_report_error { if [[ ! $- =~ e ]]; then # if errexit is not set (set -e), don't report an error here return 0 fi trap - ERR # reset the error handler to default in case there are errors within this function read line file <<<$(caller) printf "${YELLOW}An error occurred on line $line${NC}\n" local totalStack totalStack=$(stacktrace) report_install_fail "An error occurred with stack $totalStack - bin utils $KURL_BIN_UTILS_FILE - context $REPORTING_CONTEXT_INFO" if [ -n "$SUPPORT_BUNDLE_READY" ]; then collect_support_bundle fi exit 1 } function stacktrace { local i=1 local totalStack while caller $i > /dev/null; do read line func file <<<$(caller $i) totalStack="$totalStack (file: $file func: $func line: $line)" ((i++)) done echo "$totalStack" } PV_BASE_PATH=/opt/replicated/rook function disable_rook_ceph_operator() { if ! is_rook_1; then return 0 fi kubectl -n rook-ceph scale deployment rook-ceph-operator --replicas=0 } function enable_rook_ceph_operator() { if ! is_rook_1; then return 0 fi kubectl -n rook-ceph scale deployment rook-ceph-operator --replicas=1 } function is_rook_1() { kubectl -n rook-ceph get cephblockpools replicapool &>/dev/null } function rook_ceph_osd_pods_gone() { if kubectl -n rook-ceph get pods -l app=rook-ceph-osd 2>&1 | grep 'rook-ceph-osd' &>/dev/null ; then return 1 fi return 0 } function remove_rook_ceph() { # make sure there aren't any PVs using rook before deleting it all_pv_drivers="$(kubectl get pv -o=jsonpath='{.items[*].spec.csi.driver}')" if echo "$all_pv_drivers" | grep "rook" &>/dev/null ; then # do stuff printf "${RED}" printf "ERROR: \n" printf "There are still PVs using rook-ceph.\n" printf "Remove these PVs before continuing.\n" printf "${NC}" exit 1 fi # scale ekco to 0 replicas if it exists if kubernetes_resource_exists kurl deployment ekc-operator; then kubectl -n kurl scale deploy ekc-operator --replicas=0 fi # remove all rook-ceph CR objects printf "Removing rook-ceph custom resource objects - this may take some time:\n" kubectl delete cephcluster -n rook-ceph rook-ceph # deleting this first frees up resources kubectl get crd | grep 'ceph.rook.io' | awk '{ print $1 }' | xargs -I'{}' kubectl -n rook-ceph delete '{}' --all kubectl delete volumes.rook.io --all # wait for rook-ceph-osd pods to disappear echo "Waiting for rook-ceph OSD pods to be removed" spinner_until 120 rook_ceph_osd_pods_gone # delete rook-ceph CRDs printf "Removing rook-ceph custom resources:\n" kubectl get crd | grep 'ceph.rook.io' | awk '{ print $1 }' | xargs -I'{}' kubectl delete crd '{}' kubectl delete crd volumes.rook.io # delete rook-ceph ns kubectl delete ns rook-ceph # delete rook-ceph storageclass(es) printf "Removing rook-ceph StorageClasses" kubectl get storageclass | grep rook | awk '{ print $1 }' | xargs -I'{}' kubectl delete storageclass '{}' # scale ekco back to 1 replicas if it exists if kubernetes_resource_exists kurl deployment ekc-operator; then kubectl -n kurl scale deploy ekc-operator --replicas=1 fi # print success message printf "${GREEN}Removed rook-ceph successfully!\n${NC}" printf "Data within /var/lib/rook, /opt/replicated/rook and any bound disks has not been freed.\n" } # scale down prometheus, move all 'rook-ceph' PVCs to 'longhorn', scale up prometheus function rook_ceph_to_longhorn() { # set prometheus scale if it exists if kubectl get namespace monitoring &>/dev/null; then kubectl patch prometheus -n monitoring k8s --type='json' --patch '[{"op": "replace", "path": "/spec/replicas", value: 0}]' fi # get the list of StorageClasses that use rook-ceph rook_scs=$(kubectl get storageclass | grep rook | grep -v '(default)' | awk '{ print $1}') # any non-default rook StorageClasses rook_default_sc=$(kubectl get storageclass | grep rook | grep '(default)' | awk '{ print $1}') # any default rook StorageClasses for rook_sc in $rook_scs do # run the migration (without setting defaults) $BIN_PVMIGRATE --source-sc "$rook_sc" --dest-sc longhorn --rsync-image "$KURL_UTIL_IMAGE" done for rook_sc in $rook_default_sc do # run the migration (setting defaults) $BIN_PVMIGRATE --source-sc "$rook_sc" --dest-sc longhorn --rsync-image "$KURL_UTIL_IMAGE" --set-defaults done # reset prometheus scale if kubectl get namespace monitoring &>/dev/null; then kubectl patch prometheus -n monitoring k8s --type='json' --patch '[{"op": "replace", "path": "/spec/replicas", value: 2}]' fi # print success message printf "${GREEN}Migration from rook-ceph to longhorn completed successfully!\n${NC}" } # if PVCs and object store data have both been migrated from rook-ceph and rook-ceph is no longer specified in the kURL spec, remove rook-ceph function maybe_cleanup_rook() { if [ -z "$ROOK_VERSION" ]; then if [ "$DID_MIGRATE_ROOK_PVCS" == "1" ] && [ "$DID_MIGRATE_ROOK_OBJECT_STORE" == "1" ]; then report_addon_start "rook-ceph-removal" "v1" remove_rook_ceph report_addon_success "rook-ceph-removal" "v1" fi fi } # Kurl Specific RKE Install RKE2_SHOULD_RESTART= function rke2_init() { # logStep "Initialize Kubernetes" # kubernetes_maybe_generate_bootstrap_token # API_SERVICE_ADDRESS="$PRIVATE_ADDRESS:6443" # if [ "$HA_CLUSTER" = "1" ]; then # API_SERVICE_ADDRESS="$LOAD_BALANCER_ADDRESS:$LOAD_BALANCER_PORT" # fi # local oldLoadBalancerAddress=$(kubernetes_load_balancer_address) # if commandExists ekco_handle_load_balancer_address_change_pre_init; then # ekco_handle_load_balancer_address_change_pre_init $oldLoadBalancerAddress $LOAD_BALANCER_ADDRESS # fi # kustomize_kubeadm_init=./kustomize/kubeadm/init # CERT_KEY= # CERT_KEY_EXPIRY= # if [ "$HA_CLUSTER" = "1" ]; then # CERT_KEY=$(< /dev/urandom tr -dc a-f0-9 | head -c64) # CERT_KEY_EXPIRY=$(TZ="UTC" date -d "+2 hour" --rfc-3339=second | sed 's/ /T/') # insert_patches_strategic_merge \ # $kustomize_kubeadm_init/kustomization.yaml \ # patch-certificate-key.yaml # fi # # kustomize can merge multiple list patches in some cases but it is not working for me on the # # ClusterConfiguration.apiServer.certSANs list # if [ -n "$PUBLIC_ADDRESS" ] && [ -n "$LOAD_BALANCER_ADDRESS" ]; then # insert_patches_strategic_merge \ # $kustomize_kubeadm_init/kustomization.yaml \ # patch-public-and-load-balancer-address.yaml # elif [ -n "$PUBLIC_ADDRESS" ]; then # insert_patches_strategic_merge \ # $kustomize_kubeadm_init/kustomization.yaml \ # patch-public-address.yaml # elif [ -n "$LOAD_BALANCER_ADDRESS" ]; then # insert_patches_strategic_merge \ # $kustomize_kubeadm_init/kustomization.yaml \ # patch-load-balancer-address.yaml # fi # # Add kubeadm init patches from addons. # for patch in $(ls -1 ${kustomize_kubeadm_init}-patches/* 2>/dev/null || echo); do # patch_basename="$(basename $patch)" # cp $patch $kustomize_kubeadm_init/$patch_basename # insert_patches_strategic_merge \ # $kustomize_kubeadm_init/kustomization.yaml \ # $patch_basename # done # mkdir -p "$KUBEADM_CONF_DIR" # kubectl kustomize $kustomize_kubeadm_init > $KUBEADM_CONF_DIR/kubeadm-init-raw.yaml # render_yaml_file $KUBEADM_CONF_DIR/kubeadm-init-raw.yaml > $KUBEADM_CONF_FILE # # kustomize requires assests have a metadata field while kubeadm config will reject yaml containing it # # this uses a go binary found in kurl/cmd/yamlutil to strip the metadata field from the yaml # # # cp $KUBEADM_CONF_FILE $KUBEADM_CONF_DIR/kubeadm_conf_copy_in # $DIR/bin/yamlutil -r -fp $KUBEADM_CONF_DIR/kubeadm_conf_copy_in -yf metadata # mv $KUBEADM_CONF_DIR/kubeadm_conf_copy_in $KUBEADM_CONF_FILE # cat << EOF >> $KUBEADM_CONF_FILE # apiVersion: kubelet.config.k8s.io/v1beta1 # kind: KubeletConfiguration # cgroupDriver: systemd # --- # EOF # # When no_proxy changes kubeadm init rewrites the static manifests and fails because the api is # # restarting. Trigger the restart ahead of time and wait for it to be healthy. # if [ -f "/etc/kubernetes/manifests/kube-apiserver.yaml" ] && [ -n "$no_proxy" ] && ! cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep -q "$no_proxy"; then # kubeadm init phase control-plane apiserver --config $KUBEADM_CONF_FILE # sleep 2 # if ! spinner_until 60 kubernetes_api_is_healthy; then # echo "Failed to wait for kubernetes API restart after no_proxy change" # continue # fi # fi # if [ "$HA_CLUSTER" = "1" ]; then # UPLOAD_CERTS="--upload-certs" # fi # # kubeadm init temporarily taints this node which causes rook to move any mons on it and may # # lead to a loss of quorum # disable_rook_ceph_operator # # since K8s 1.19.1 kubeconfigs point to local API server even in HA setup. When upgrading from # # earlier versions and using a load balancer, kubeadm init will bail because the kubeconfigs # # already exist pointing to the load balancer # rm -rf /etc/kubernetes/*.conf # # Regenerate api server cert in case load balancer address changed # if [ -f /etc/kubernetes/pki/apiserver.crt ]; then # mv -f /etc/kubernetes/pki/apiserver.crt /tmp/ # fi # if [ -f /etc/kubernetes/pki/apiserver.key ]; then # mv -f /etc/kubernetes/pki/apiserver.key /tmp/ # fi # set -o pipefail # kubeadm init \ # --ignore-preflight-errors=all \ # --config $KUBEADM_CONF_FILE \ # $UPLOAD_CERTS \ # | tee /tmp/kubeadm-init # set +o pipefail # if [ -n "$LOAD_BALANCER_ADDRESS" ]; then # spinner_until 120 cert_has_san "$PRIVATE_ADDRESS:6443" "$LOAD_BALANCER_ADDRESS" # fi # spinner_kubernetes_api_stable # exportKubeconfig # This was moved to the setup function # KUBEADM_TOKEN_CA_HASH=$(cat /tmp/kubeadm-init | grep 'discovery-token-ca-cert-hash' | awk '{ print $2 }' | head -1) wait_for_nodes enable_rook_ceph_operator DID_INIT_KUBERNETES=1 # logSuccess "Kubernetes Master Initialized" # local currentLoadBalancerAddress=$(kubernetes_load_balancer_address) # if [ "$currentLoadBalancerAddress" != "$oldLoadBalancerAddress" ]; then # # restart scheduler and controller-manager on this node so they use the new address # mv /etc/kubernetes/manifests/kube-scheduler.yaml /tmp/ && sleep 1 && mv /tmp/kube-scheduler.yaml /etc/kubernetes/manifests/ # mv /etc/kubernetes/manifests/kube-controller-manager.yaml /tmp/ && sleep 1 && mv /tmp/kube-controller-manager.yaml /etc/kubernetes/manifests/ # # restart kube-proxies so they use the new address # kubectl -n kube-system delete pods --selector=k8s-app=kube-proxy # if kubernetes_has_remotes; then # local proxyFlag="" # if [ -n "$PROXY_ADDRESS" ]; then # proxyFlag=" -x $PROXY_ADDRESS" # fi # local prefix="curl -sSL${proxyFlag} $KURL_URL/$INSTALLER_ID/" # if [ "$AIRGAP" = "1" ] || [ -z "$KURL_URL" ]; then # prefix="cat " # fi # printf "${YELLOW}\nThe load balancer address has changed. Run the following on all remote nodes to use the new address${NC}\n" # printf "\n" # printf "${GREEN} ${prefix}tasks.sh | sudo bash -s set-kubeconfig-server https://${currentLoadBalancerAddress}${NC}\n" # printf "\n" # printf "Continue? " # confirmN # if commandExists ekco_handle_load_balancer_address_change_post_init; then # ekco_handle_load_balancer_address_change_post_init $oldLoadBalancerAddress $LOAD_BALANCER_ADDRESS # fi # fi # fi labelNodes kubectl cluster-info # create kurl namespace if it doesn't exist kubectl get ns kurl 2>/dev/null 1>/dev/null || kubectl create ns kurl 1>/dev/null logSuccess "Cluster Initialized" # TODO(dans): coredns is deployed through helm -> might need to go through values here # configure_coredns if commandExists registry_init; then registry_init fi } function rke2_install() { local rke2_version="$1" export PATH=$PATH:/var/lib/rancher/rke2/bin export KUBECONFIG=/etc/rancher/rke2/rke2.yaml export CRI_CONFIG_FILE=/var/lib/rancher/rke2/agent/etc/crictl.yaml # TODO(ethan): is this still necessary? # kubernetes_load_ipvs_modules # TODO(ethan): is this still necessary? # kubernetes_sysctl_config local k8s_semver= k8s_semver="$(echo "${rke2_version}" | sed 's/^v\(.*\)-.*$/\1/')" # For online always download the rke2.tar.gz bundle. # Regardless if host packages are already installed, we always inspect for newer versions # and/or re-install any missing or corrupted packages. # TODO(ethan): is this comment correct? if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then rke2_get_host_packages_online "${rke2_version}" kubernetes_get_conformance_packages_online "${k8s_semver}" fi rke2_configure rke2_install_host_packages "${rke2_version}" rke2_load_images "${rke2_version}" systemctl enable rke2-server.service systemctl start rke2-server.service spinner_containerd_is_healthy get_shared logStep "Installing plugins" install_plugins logSuccess "Plugins installed" # TODO(ethan) # install_kustomize while [ ! -f /etc/rancher/rke2/rke2.yaml ]; do sleep 2 done if [ -d "$DIR/packages/kubernetes-conformance/${k8s_semver}/images" ]; then load_images "$DIR/packages/kubernetes-conformance/${k8s_semver}/images" fi # For Kubectl and Rke2 binaries # NOTE: this is still not in root's path if ! grep -q "/var/lib/rancher/rke2/bin" /etc/profile ; then echo "export PATH=\$PATH:/var/lib/rancher/rke2/bin" >> /etc/profile fi if ! grep -q "/var/lib/rancher/rke2/agent/etc/crictl.yaml" /etc/profile ; then echo "export CRI_CONFIG_FILE=/var/lib/rancher/rke2/agent/etc/crictl.yaml" >> /etc/profile fi exportKubeconfig logStep "Waiting for Kubernetes" # Extending timeout to 5 min based on performance on clean machines. if ! spinner_until 300 get_nodes_succeeds ; then # this should exit script on non-zero exit code and print error message kubectl get nodes 1>/dev/null fi wait_for_default_namespace logSuccess "Kubernetes ready" # TODO(dan): Need to figure out how to let users run container tools as non-root } function rke2_preamble() { printf "${RED}" cat << "EOF" ( ) ( ____ )\ ) ( ( /( ( )\ ) | / (()/( )\ )\()) )\ ) ( (()/( | / /(_))((((_)( ((_)\ (()/( )\ /(_)) | / (_))_ )\ _ )\ _((_) /(_))_ ((_) (_)) |/ | \ (_)_\(_)| \| |(_)) __|| __|| _ \ ( | |) | / _ \ | .` | | (_ || _| | / )\ |___/ /_/ \_\ |_|\_| \___||___||_|_\((_) EOF printf "${NC}\n" printf "${RED}YOU ARE NOW INSTALLING RKE2 WITH KURL. THIS FEATURE IS EXPERIMENTAL!${NC}\n" printf "${RED}\t- It can be removed at any point in the future.${NC}\n" printf "${RED}\t- There are zero guarantees regarding addon compatibility.${NC}\n" printf "${RED}\n\nCONTINUING AT YOUR OWN RISK....${NC}\n\n" } function rke2_outro() { echo # if [ -z "$PUBLIC_ADDRESS" ]; then # if [ -z "$PRIVATE_ADDRESS" ]; then # PUBLIC_ADDRESS="" # PRIVATE_ADDRESS="" # else # PUBLIC_ADDRESS="$PRIVATE_ADDRESS" # fi # fi # local proxyFlag="" # if [ -n "$PROXY_ADDRESS" ]; then # proxyFlag=" -x $PROXY_ADDRESS" # fi # local common_flags # common_flags="${common_flags}$(get_docker_registry_ip_flag "${DOCKER_REGISTRY_IP}")" # common_flags="${common_flags}$(get_additional_no_proxy_addresses_flag "${PROXY_ADDRESS}" "${SERVICE_CIDR},${POD_CIDR}")" # common_flags="${common_flags}$(get_kurl_install_directory_flag "${KURL_INSTALL_DIRECTORY_FLAG}")" # TODO(dan): move this somewhere into the k8s distro # KUBEADM_TOKEN_CA_HASH=$(cat /tmp/kubeadm-init | grep 'discovery-token-ca-cert-hash' | awk '{ print $2 }' | head -1) printf "\n" printf "\t\t${GREEN}Installation${NC}\n" printf "\t\t${GREEN} Complete ✔${NC}\n" addon_outro printf "\n" # TODO(dan): specific to kubeadm config. # kubeconfig_setup_outro local prefix="curl -sSL${proxyFlag} $KURL_URL/$INSTALLER_ID/" if [ -z "$KURL_URL" ]; then prefix="cat " fi # if [ "$HA_CLUSTER" = "1" ]; then # printf "Master node join commands expire after two hours, and worker node join commands expire after 24 hours.\n" # printf "\n" # if [ "$AIRGAP" = "1" ]; then # printf "To generate new node join commands, run ${GREEN}cat ./tasks.sh | sudo bash -s join_token ha airgap${NC} on an existing master node.\n" # else # printf "To generate new node join commands, run ${GREEN}${prefix}tasks.sh | sudo bash -s join_token ha${NC} on an existing master node.\n" # fi # else # printf "Node join commands expire after 24 hours.\n" # printf "\n" # if [ "$AIRGAP" = "1" ]; then # printf "To generate new node join commands, run ${GREEN}cat ./tasks.sh | sudo bash -s join_token airgap${NC} on this node.\n" # else # printf "To generate new node join commands, run ${GREEN}${prefix}tasks.sh | sudo bash -s join_token${NC} on this node.\n" # fi # fi # if [ "$AIRGAP" = "1" ]; then # printf "\n" # printf "To add worker nodes to this installation, copy and unpack this bundle on your other nodes, and run the following:" # printf "\n" # printf "\n" # printf "${GREEN} cat ./join.sh | sudo bash -s airgap kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=${KUBEADM_TOKEN_CA_HASH} kubernetes-version=${KUBERNETES_VERSION}${common_flags}\n" # printf "${NC}" # printf "\n" # printf "\n" # if [ "$HA_CLUSTER" = "1" ]; then # printf "\n" # printf "To add ${GREEN}MASTER${NC} nodes to this installation, copy and unpack this bundle on your other nodes, and run the following:" # printf "\n" # printf "\n" # printf "${GREEN} cat ./join.sh | sudo bash -s airgap kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=${KUBEADM_TOKEN_CA_HASH} kubernetes-version=${KUBERNETES_VERSION} cert-key=${CERT_KEY} control-plane${common_flags}\n" # printf "${NC}" # printf "\n" # printf "\n" # fi # else # printf "\n" # printf "To add worker nodes to this installation, run the following script on your other nodes:" # printf "\n" # printf "${GREEN} ${prefix}join.sh | sudo bash -s kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=${KUBEADM_TOKEN_CA_HASH} kubernetes-version=${KUBERNETES_VERSION}${common_flags}\n" # printf "${NC}" # printf "\n" # printf "\n" # if [ "$HA_CLUSTER" = "1" ]; then # printf "\n" # printf "To add ${GREEN}MASTER${NC} nodes to this installation, run the following script on your other nodes:" # printf "\n" # printf "${GREEN} ${prefix}join.sh | sudo bash -s kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=$KUBEADM_TOKEN_CA_HASH kubernetes-version=${KUBERNETES_VERSION} cert-key=${CERT_KEY} control-plane${common_flags}\n" # printf "${NC}" # printf "\n" # printf "\n" # fi # fi } function rke2_main() { local rke2_version="$(echo "${RKE2_VERSION}" | sed 's/+/-/')" rke2_preamble # RKE Begin # parse_kubernetes_target_version # TODO(dan): Version only makes sense for kuberntees discover full-cluster # TODO(dan): looks for docker and kubernetes, shouldn't hurt # report_install_start # TODO(dan) remove reporting for now. # trap prek8s_ctrl_c SIGINT # trap ctrl+c (SIGINT) and handle it by reporting that the user exited intentionally # TODO(dan) remove reporting for now. # preflights # TODO(dan): mostly good, but disable for now ${K8S_DISTRO}_addon_for_each addon_fetch # if [ -z "$CURRENT_KUBERNETES_VERSION" ]; then # TODO (ethan): support for CURRENT_KUBERNETES_VERSION # host_preflights "1" "0" "0" # else # host_preflights "1" "0" "1" # fi common_prompts # TODO(dan): shouldn't come into play for RKE2 journald_persistent configure_proxy install_host_dependencies get_common ${K8S_DISTRO}_addon_for_each addon_pre_init discover_pod_subnet # discover_service_subnet # TODO(dan): uses kubeadm configure_no_proxy rke2_install "${rke2_version}" # upgrade_kubernetes # TODO(dan): uses kubectl operator # kubernetes_host # TODO(dan): installs and sets up kubeadm, kubectl # setup_kubeadm_kustomize # TODO(dan): self-explainatory # trap k8s_ctrl_c SIGINT # trap ctrl+c (SIGINT) and handle it by asking for a support bundle - only do this after k8s is installed ${K8S_DISTRO}_addon_for_each addon_load # init # See next line rke2_init # TODO(dan): A mix of Kubeadm stuff and general setup. apply_installer_crd kurl_init_config ${K8S_DISTRO}_addon_for_each addon_install # post_init # TODO(dan): more kubeadm token setup rke2_outro package_cleanup # report_install_success # TODO(dan) remove reporting for now. } function rke2_configure() { # prevent permission denied error when running kubectl if ! grep -qs "^write-kubeconfig-mode:" /etc/rancher/rke2/config.yaml ; then mkdir -p /etc/rancher/rke2/ echo "write-kubeconfig-mode: 644" >> /etc/rancher/rke2/config.yaml RKE2_SHOULD_RESTART=1 fi # TODO(ethan): pod cidr # TODO(ethan): service cidr # TODO(ethan): http proxy # TODO(ethan): load balancer } function rke2_restart() { restart_systemd_and_wait "rke2-server.service" # TODO(ethan): rke2-agent.service? } function rke2_install_host_packages() { local rke2_version="$1" if rke2_host_packages_ok "${rke2_version}"; then logSuccess "RKE2 host packages already installed" if [ "${RKE2_SHOULD_RESTART}" = "1" ]; then rke2_restart RKE2_SHOULD_RESTART=0 fi return fi case "$LSB_DIST" in centos|rhel|amzn|ol) install_host_packages "${DIR}/packages/rke-2/${rke2_version}" rke2-server rke2-agent ;; *) bail "RKE2 install is not supported on ${LSB_DIST} ${DIST_MAJOR}" ;; esac # TODO(ethan): is this still necessary? # if [ "$CLUSTER_DNS" != "$DEFAULT_CLUSTER_DNS" ]; then # sed -i "s/$DEFAULT_CLUSTER_DNS/$CLUSTER_DNS/g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf # fi } function rke2_host_packages_ok() { local rke2_version="$1" if ! commandExists kubelet; then echo "kubelet command missing - will install host components" return 1 fi if ! commandExists kubectl; then echo "kubectl command missing - will install host components" return 1 fi kubelet --version | grep -q "$(echo $rke2_version | sed "s/-/+/")" } function rke2_get_host_packages_online() { local rke2_version="$1" rm -rf $DIR/packages/rke-2/${rke2_version} # Cleanup broken/incompatible packages from failed runs local package="rke-2-${rke2_version}.tar.gz" package_download "${package}" tar xf "$(package_filepath "${package}")" } function rke2_load_images() { local rke2_version="$1" logStep "Load RKE2 images" mkdir -p /var/lib/rancher/rke2/agent/images gunzip -c $DIR/packages/rke-2/${rke2_version}/assets/rke2-images.linux-amd64.tar.gz > /var/lib/rancher/rke2/agent/images/rke2-images.linux-amd64.tar logSuccess "RKE2 images loaded" } function upgrade_kubernetes() { if [ "$KUBERNETES_UPGRADE" != "1" ]; then enable_rook_ceph_operator return fi disable_rook_ceph_operator upgrade_kubernetes_step upgrade_kubernetes_minor upgrade_kubernetes_patch enable_rook_ceph_operator } function report_upgrade_kubernetes() { report_addon_start "kubernetes_upgrade" "$KUBERNETES_VERSION" export REPORTING_CONTEXT_INFO="kubernetes_upgrade $KUBERNETES_VERSION" upgrade_kubernetes export REPORTING_CONTEXT_INFO="" report_addon_success "kubernetes_upgrade" "$KUBERNETES_VERSION" } function upgrade_kubernetes_step() { if [ "$KUBERNETES_STEP_LOCAL_PRIMARY" == "1" ]; then upgrade_kubernetes_local_master_minor "$STEP_VERSION" fi if [ "$KUBERNETES_STEP_REMOTE_PRIMARIES" == "1" ]; then upgrade_kubernetes_remote_masters_minor "$STEP_VERSION" fi if [ "$KUBERNETES_STEP_SECONDARIES" == "1" ]; then upgrade_kubernetes_workers_minor "$STEP_VERSION" fi } function upgrade_kubernetes_minor() { if [ "$KUBERNETES_UPGRADE_LOCAL_PRIMARY_MINOR" == "1" ]; then upgrade_kubernetes_local_master_minor "$KUBERNETES_VERSION" fi if [ "$KUBERNETES_UPGRADE_REMOTE_PRIMARIES_MINOR" == "1" ]; then upgrade_kubernetes_remote_masters_minor "$KUBERNETES_VERSION" fi if [ "$KUBERNETES_UPGRADE_SECONDARIES_MINOR" == "1" ]; then upgrade_kubernetes_workers_minor "$KUBERNETES_VERSION" fi } function upgrade_kubernetes_patch() { if [ "$KUBERNETES_UPGRADE_LOCAL_PRIMARY_PATCH" == "1" ]; then upgrade_kubernetes_local_master_patch "$KUBERNETES_VERSION" fi if [ "$KUBERNETES_UPGRADE_REMOTE_PRIMARIES_PATCH" == "1" ]; then upgrade_kubernetes_remote_masters_patch "$KUBERNETES_VERSION" fi if [ "$KUBERNETES_UPGRADE_SECONDARIES_PATCH" == "1" ]; then upgrade_kubernetes_workers_patch "$KUBERNETES_VERSION" fi } function upgrade_kubernetes_local_master_patch() { local k8sVersion="$1" local node="$(get_local_node_name)" if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then kubernetes_get_host_packages_online "$k8sVersion" kubernetes_get_conformance_packages_online "$k8sVersion" fi load_images "$DIR/packages/kubernetes/$k8sVersion/images" if [ -d "$DIR/packages/kubernetes-conformance/$k8sVersion/images" ]; then load_images "$DIR/packages/kubernetes-conformance/$k8sVersion/images" fi upgrade_kubeadm "$k8sVersion" kubeadm upgrade plan "v${k8sVersion}" printf "${YELLOW}Drain local node and apply upgrade? ${NC}" confirmY kubernetes_drain "$node" spinner_kubernetes_api_stable kubeadm upgrade apply "v$k8sVersion" --yes --force kubernetes_install_host_packages "$k8sVersion" systemctl daemon-reload systemctl restart kubelet spinner_kubernetes_api_stable kubectl uncordon "$node" spinner_until 120 kubernetes_node_has_version "$node" "$k8sVersion" spinner_until 120 kubernetes_all_nodes_ready } function upgrade_kubeadm() { local k8sVersion=$1 cp -f "$DIR/packages/kubernetes/${k8sVersion}/assets/kubeadm" /usr/bin/ chmod a+rx /usr/bin/kubeadm } function upgrade_kubernetes_remote_masters_patch() { while read -r master; do upgrade_kubernetes_remote_node_patch "$master" done < <(try_1m kubernetes_remote_masters) spinner_until 120 kubernetes_all_nodes_ready } function upgrade_kubernetes_workers_patch() { while read -r worker; do upgrade_kubernetes_remote_node_patch "$worker" done < <(try_1m kubernetes_workers) } function upgrade_kubernetes_remote_node_patch() { # one line of output from `kubectl get nodes` local node="$1" nodeName=$(echo "$node" | awk '{ print $1 }') nodeVersion="$(echo "$node" | awk '{ print $5 }' | sed 's/v//' )" semverParse "$nodeVersion" nodeMinor="$minor" nodePatch="$patch" if [ "$nodeMinor" -gt "$KUBERNETES_TARGET_VERSION_MINOR" ]; then continue fi if [ "$nodeMinor" -eq "$KUBERNETES_TARGET_VERSION_MINOR" ] && [ "$nodePatch" -ge "$KUBERNETES_TARGET_VERSION_PATCH" ]; then continue fi DOCKER_REGISTRY_IP=$(kubectl -n kurl get service registry -o=jsonpath='{@.spec.clusterIP}' 2>/dev/null || echo "") printf "${YELLOW}Drain node $nodeName to prepare for upgrade? ${NC}" confirmY kubernetes_drain "$nodeName" local common_flags common_flags="${common_flags}$(get_docker_registry_ip_flag "${DOCKER_REGISTRY_IP}")" common_flags="${common_flags}$(get_additional_no_proxy_addresses_flag "${NO_PROXY_ADDRESSES}" "${NO_PROXY_ADDRESSES}")" common_flags="${common_flags}$(get_kurl_install_directory_flag "${KURL_INSTALL_DIRECTORY_FLAG}")" printf "\n\n\tRun the upgrade script on remote node to proceed: ${GREEN}$nodeName${NC}\n\n" if [ "$AIRGAP" = "1" ]; then printf "\t${GREEN}cat ./upgrade.sh | sudo bash -s airgap kubernetes-version=${KUBERNETES_VERSION}${common_flags}${NC}\n\n" else local prefix= prefix="$(build_installer_prefix "${INSTALLER_ID}" "${KURL_VERSION}" "${KURL_URL}" "${PROXY_ADDRESS}")" printf "\t${GREEN} ${prefix}upgrade.sh | sudo bash -s kubernetes-version=${KUBERNETES_VERSION}${common_flags}${NC}\n\n" fi spinner_until -1 kubernetes_node_has_version "$nodeName" "$KUBERNETES_VERSION" logSuccess "Kubernetes $KUBERNETES_VERSION detected on $nodeName" kubectl uncordon "$nodeName" } function upgrade_kubernetes_local_master_minor() { local k8sVersion="$1" local node="$(get_local_node_name)" if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then kubernetes_get_host_packages_online "$k8sVersion" kubernetes_get_conformance_packages_online "$k8sVersion" fi load_images "$DIR/packages/kubernetes/$k8sVersion/images" if [ -d "$DIR/packages/kubernetes-conformance/$k8sVersion/images" ]; then load_images "$DIR/packages/kubernetes-conformance/$k8sVersion/images" fi upgrade_kubeadm "$k8sVersion" kubeadm upgrade plan "v${k8sVersion}" printf "${YELLOW}Drain local node and apply upgrade? ${NC}" confirmY kubernetes_drain "$node" spinner_kubernetes_api_stable kubeadm upgrade apply "v$k8sVersion" --yes --force upgrade_etcd_image_18 "$k8sVersion" kubernetes_install_host_packages "$k8sVersion" systemctl daemon-reload systemctl restart kubelet spinner_kubernetes_api_stable kubectl uncordon "$node" # force deleting the cache because the api server will use the stale API versions after kubeadm upgrade rm -rf $HOME/.kube spinner_until 120 kubernetes_node_has_version "$node" "$k8sVersion" spinner_until 120 kubernetes_all_nodes_ready } function upgrade_kubernetes_remote_masters_minor() { local k8sVersion="$1" while read -r master; do upgrade_kubernetes_remote_node_minor "$master" "$k8sVersion" done < <(try_1m kubernetes_remote_masters) spinner_until 120 kubernetes_all_nodes_ready } function upgrade_kubernetes_workers_minor() { local k8sVersion="$1" while read -r worker; do upgrade_kubernetes_remote_node_minor "$worker" "$k8sVersion" done < <(try_1m kubernetes_workers) } function upgrade_kubernetes_remote_node_minor() { # one line of output from `kubectl get nodes` local node="$1" local targetK8sVersion="$2" nodeName=$(echo "$node" | awk '{ print $1 }') nodeVersion="$(echo "$node" | awk '{ print $5 }' | sed 's/v//' )" semverParse "$nodeVersion" nodeMinor="$minor" nodePatch="$patch" semverParse "$targetK8sVersion" local targetMinor="$minor" local targetPatch="$patch" if [ "$nodeMinor" -ge "$targetMinor" ]; then return 0 fi DOCKER_REGISTRY_IP=$(kubectl -n kurl get service registry -o=jsonpath='{@.spec.clusterIP}' 2>/dev/null || echo "") printf "${YELLOW}Drain node $nodeName to prepare for upgrade? ${NC}" confirmY kubernetes_drain "$nodeName" local common_flags common_flags="${common_flags}$(get_docker_registry_ip_flag "${DOCKER_REGISTRY_IP}")" common_flags="${common_flags}$(get_additional_no_proxy_addresses_flag "${NO_PROXY_ADDRESSES}" "${NO_PROXY_ADDRESSES}")" common_flags="${common_flags}$(get_kurl_install_directory_flag "${KURL_INSTALL_DIRECTORY_FLAG}")" printf "\n\n\tRun the upgrade script on remote node to proceed: ${GREEN}$nodeName${NC}\n\n" if [ "$AIRGAP" = "1" ]; then printf "\t${GREEN}cat ./upgrade.sh | sudo bash -s airgap kubernetes-version=${targetK8sVersion}${common_flags}${NC}\n\n" else local prefix= prefix="$(build_installer_prefix "${INSTALLER_ID}" "${KURL_VERSION}" "${KURL_URL}" "${PROXY_ADDRESS}")" printf "\t${GREEN} ${prefix}upgrade.sh | sudo bash -s kubernetes-version=${targetK8sVersion}${common_flags}${NC}\n\n" fi rm -rf $HOME/.kube spinner_until -1 kubernetes_node_has_version "$nodeName" "$targetK8sVersion" logSuccess "Kubernetes $targetK8sVersion detected on $nodeName" kubectl uncordon "$nodeName" spinner_until 120 kubernetes_all_nodes_ready } # In k8s 1.18 the etcd image tag changed from 3.4.3 to 3.4.3-0 but kubeadm does not rewrite the # etcd manifest to use the new tag. When kubeadm init is run after the upgrade it switches to the # tag and etcd takes a few minutes to restart, which often results in kubeadm init failing. This # forces use of the updated tag so that the restart of etcd happens during upgrade when the node is # already drained function upgrade_etcd_image_18() { semverParse "$1" if [ "$minor" != "18" ]; then return 0 fi local etcd_tag=$(kubeadm config images list 2>/dev/null | grep etcd | awk -F':' '{ print $NF }') sed -i "s/image: k8s.gcr.io\/etcd:.*/image: k8s.gcr.io\/etcd:$etcd_tag/" /etc/kubernetes/manifests/etcd.yaml } function download_util_binaries() { if [ -z "$AIRGAP" ] && [ -n "$DIST_URL" ]; then package_download "${KURL_BIN_UTILS_FILE}" tar xzf "$(package_filepath "${KURL_BIN_UTILS_FILE}")" fi BIN_SYSTEM_CONFIG=./bin/config BIN_YAMLUTIL=./bin/yamlutil BIN_DOCKER_CONFIG=./bin/docker-config BIN_SUBNET=./bin/subnet BIN_INSTALLERMERGE=./bin/installermerge BIN_YAMLTOBASH=./bin/yamltobash BIN_BASHTOYAML=./bin/bashmerge BIN_PVMIGRATE=./bin/pvmigrate mkdir -p /tmp/kurl-bin-utils/scripts CONFIGURE_SELINUX_SCRIPT=/tmp/kurl-bin-utils/scripts/configure_selinux.sh CONFIGURE_FIREWALLD_SCRIPT=/tmp/kurl-bin-utils/scripts/configure_firewalld.sh CONFIGURE_IPTABLES_SCRIPT=/tmp/kurl-bin-utils/scripts/configure_iptables.sh mkdir -p /tmp/kurl-bin-utils/specs MERGED_YAML_SPEC=/tmp/kurl-bin-utils/specs/merged.yaml PARSED_YAML_SPEC=/tmp/kurl-bin-utils/scripts/variables.sh } function apply_bash_flag_overrides() { if [ -n "$1" ]; then $BIN_BASHTOYAML -c $MERGED_YAML_SPEC -f "$*" fi } function parse_yaml_into_bash_variables() { $BIN_YAMLTOBASH -i $MERGED_YAML_SPEC -b $PARSED_YAML_SPEC source $PARSED_YAML_SPEC rm $PARSED_YAML_SPEC } parse_kubernetes_target_version() { semverParse "$KUBERNETES_VERSION" KUBERNETES_TARGET_VERSION_MAJOR="$major" KUBERNETES_TARGET_VERSION_MINOR="$minor" KUBERNETES_TARGET_VERSION_PATCH="$patch" } function yaml_airgap() { # this is needed because the parsing for yaml comes after the first occasion where the $AIRGAP flag is used # we also account for if $INSTALLER_YAML spec has "$AIRGAP and "INSTALLER_SPEC_FILE spec turns it off" if [[ "$INSTALLER_YAML" =~ "airgap: true" ]]; then AIRGAP="1" fi if [ -n "$INSTALLER_SPEC_FILE" ]; then if grep -q "airgap: true" $INSTALLER_SPEC_FILE; then AIRGAP="1" fi if grep -q "airgap: false" $INSTALLER_SPEC_FILE; then AIRGAP="" fi fi } function get_patch_yaml() { while [ "$1" != "" ]; do _param="$(echo "$1" | cut -d= -f1)" _value="$(echo "$1" | grep '=' | cut -d= -f2-)" case $_param in installer-spec-file) if [ -n "$_value" ]; then INSTALLER_SPEC_FILE="$(readlink -f "$_value")" # resolve relative paths before we pushd fi ;; additional-no-proxy-addresses) ;; airgap) AIRGAP="1" ;; kurl-registry-ip) KURL_REGISTRY_IP="$_value" ;; cert-key) ;; control-plane) ;; docker-registry-ip) ;; ha) ;; ignore-remote-load-images-prompt) ;; ignore-remote-upgrade-prompt) ;; kubeadm-token) ;; kubeadm-token-ca-hash) ;; kubernetes-master-address) ;; kubernetes-version) ;; kurl-install-directory) if [ -n "$_value" ]; then KURL_INSTALL_DIRECTORY_FLAG="${_value}" KURL_INSTALL_DIRECTORY="$(realpath ${_value})/kurl" fi ;; load-balancer-address) ;; preflight-ignore) ;; preflight-ignore-warnings) ;; preserve-docker-config) ;; preserve-firewalld-config) ;; preserve-iptables-config) ;; preserve-selinux-config) ;; public-address) ;; private-address) ;; yes) ASSUME_YES=1 ;; auto-upgrades-enabled) AUTO_UPGRADES_ENABLED=1 ;; primary-host) if [ -z "$PRIMARY_HOST" ]; then PRIMARY_HOST="$_value" else PRIMARY_HOST="$PRIMARY_HOST,$_value" fi ;; secondary-host) if [ -z "$SECONDARY_HOST" ]; then SECONDARY_HOST="$_value" else SECONDARY_HOST="$SECONDARY_HOST,$_value" fi ;; force-reapply-addons) FORCE_REAPPLY_ADDONS=1 ;; *) echo >&2 "Error: unknown parameter \"$_param\"" exit 1 ;; esac shift done } function merge_yaml_specs() { if [ -z "$INSTALLER_SPEC_FILE" ] && [ -z "$INSTALLER_YAML" ]; then echo "no yaml spec found" bail fi if [ -z "$INSTALLER_YAML" ]; then cp -f $INSTALLER_SPEC_FILE $MERGED_YAML_SPEC ONLY_APPLY_MERGED=1 return fi if [ -z "$INSTALLER_SPEC_FILE" ]; then cat > $MERGED_YAML_SPEC < /tmp/vendor_kurl_installer_spec_docker.yaml </dev/null | grep 'master' | wc -l) #get nodes with the 'master' role, and then search for 'master' to remove the column labels row if [ "$master_count" -gt 1 ]; then HA_CLUSTER=1 fi } function get_addon_config() { local addon_name=$1 addon_name=$(kebab_to_camel "$addon_name") $BIN_YAMLUTIL -j -fp $MERGED_YAML_SPEC -jf "spec.$addon_name" } function render_yaml() { eval "echo \"$(cat $DIR/yaml/$1)\"" } function render_yaml_file() { eval "echo \"$(cat $1)\"" } function render_yaml_file_2() { local file="$1" local data=$(< "$file") local delimiter="__apply_shell_expansion_delimiter__" local command="cat <<$delimiter"$'\n'"$data"$'\n'"$delimiter" eval "$command" } function render_file() { eval "echo \"$(cat $1)\"" } function insert_patches_strategic_merge() { local kustomization_file="$1" local patch_file="$2" if ! grep -q "patchesStrategicMerge" "$kustomization_file"; then echo "patchesStrategicMerge:" >> "$kustomization_file" fi sed -i "/patchesStrategicMerge.*/a - $patch_file" "$kustomization_file" } function insert_resources() { local kustomization_file="$1" local resource_file="$2" if ! grep -q "resources" "$kustomization_file"; then echo "resources:" >> "$kustomization_file" fi sed -i "/resources:.*/a - $resource_file" "$kustomization_file" } function insert_patches_json_6902() { local kustomization_file="$1" local patch_file="$2" local group="$3" local version="$4" local kind="$5" local name="$6" local namespace="$7" if ! grep -q "patchesJson6902" "$kustomization_file"; then echo "patchesJson6902:" >> "$kustomization_file" fi # 'fourspace_' and 'twospace_' are used because spaces at the beginning of each line are stripped sed -i "/patchesJson6902.*/a- target:\n\ fourspace_ group: $group\n\ fourspace_ version: $version\n\ fourspace_ kind: $kind\n\ fourspace_ name: $name\n\ fourspace_ namespace: $namespace\n\ twospace_ path: $patch_file" "$kustomization_file" sed -i "s/fourspace_ / /" "$kustomization_file" sed -i "s/twospace_ / /" "$kustomization_file" } function setup_kubeadm_kustomize() { # Clean up the source directories for the kubeadm kustomize resources and # patches. rm -rf $DIR/kustomize/kubeadm/init cp -rf $DIR/kustomize/kubeadm/init-orig $DIR/kustomize/kubeadm/init rm -rf $DIR/kustomize/kubeadm/join cp -rf $DIR/kustomize/kubeadm/join-orig $DIR/kustomize/kubeadm/join rm -rf $DIR/kustomize/kubeadm/init-patches mkdir -p $DIR/kustomize/kubeadm/init-patches rm -rf $DIR/kustomize/kubeadm/join-patches mkdir -p $DIR/kustomize/kubeadm/join-patches if [ -n "$USE_STANDARD_PORT_RANGE" ]; then sed -i 's/80-60000/30000-32767/g' $DIR/kustomize/kubeadm/init/kubeadm-cluster-config-v1beta2.yml fi } function apply_installer_crd() { INSTALLER_CRD_DEFINITION="$DIR/kurlkinds/cluster.kurl.sh_installers.yaml" kubectl apply -f "$INSTALLER_CRD_DEFINITION" if [ -z "$ONLY_APPLY_MERGED" ] && [ -n "$INSTALLER_YAML" ]; then ORIGINAL_INSTALLER_SPEC=/tmp/kurl-bin-utils/specs/original.yaml cat > $ORIGINAL_INSTALLER_SPEC </dev/null | grep initial-cluster | grep -o "${HOSTNAME}-[a-z0-9]*=https*://[^\",]*" | sed -n -e 's/.*https*:\/\/\(.*\):.*/\1/p')" } function k3s_get_kubeconfig() { echo "/etc/rancher/k3s/k3s.yaml" } function k3s_get_containerd_sock() { echo "/run/k3s/containerd/containerd.sock" } function k3s_get_client_kube_apiserver_crt() { echo "/var/lib/rancher/k3s/server/tls/client-kube-apiserver.crt" } function k3s_get_client_kube_apiserver_key() { echo "/var/lib/rancher/k3s/server/tls/client-kube-apiserver.key" } function k3s_get_server_ca() { echo "/var/lib/rancher/k3s/server/tls/server-ca.crt" } function k3s_get_server_ca_key() { echo "/var/lib/rancher/k3s/server/tls/server-ca.key" } function k3s_addon_for_each() { local cmd="$1" if [ -n "$METRICS_SERVER_VERSION" ] && [ -z "$METRICS_SERVER_IGNORE" ]; then logWarn "⚠️ Metrics Server is distributed as part of K3S; the version specified in the installer will be ignored." METRICS_SERVER_IGNORE=true fi $cmd aws "$AWS_VERSION" $cmd nodeless "$NODELESS_VERSION" $cmd calico "$CALICO_VERSION" "$CALICO_S3_OVERRIDE" $cmd weave "$WEAVE_VERSION" "$WEAVE_S3_OVERRIDE" $cmd rook "$ROOK_VERSION" "$ROOK_S3_OVERRIDE" $cmd openebs "$OPENEBS_VERSION" "$OPENEBS_S3_OVERRIDE" $cmd longhorn "$LONGHORN_VERSION" "$LONGHORN_S3_OVERRIDE" $cmd minio "$MINIO_VERSION" "$MINIO_S3_OVERRIDE" $cmd contour "$CONTOUR_VERSION" "$CONTOUR_S3_OVERRIDE" $cmd registry "$REGISTRY_VERSION" "$REGISTRY_S3_OVERRIDE" $cmd prometheus "$PROMETHEUS_VERSION" "$PROMETHEUS_S3_OVERRIDE" $cmd kotsadm "$KOTSADM_VERSION" "$KOTSADM_S3_OVERRIDE" $cmd velero "$VELERO_VERSION" "$VELERO_S3_OVERRIDE" $cmd fluentd "$FLUENTD_VERSION" "$FLUENTD_S3_OVERRIDE" $cmd ekco "$EKCO_VERSION" "$EKCO_S3_OVERRIDE" $cmd collectd "$COLLECTD_VERSION" "$COLLECTD_S3_OVERRIDE" $cmd cert-manager "$CERT_MANAGER_VERSION" "$CERT_MANAGER_S3_OVERRIDE" $cmd sonobuoy "$SONOBUOY_VERSION" "$SONOBUOY_S3_OVERRIDE" $cmd goldpinger "$GOLDPINGER_VERSION" "$GOLDPINGER_S3_OVERRIDE" } function k3s_reset() { . /usr/local/bin/k3s-uninstall.sh } function k3s_containerd_restart() { k3s_restart } function k3s_registry_containerd_configure() { local registry_ip="$1" if grep -qs ' "${registry_ip}":' /etc/rancher/k3s/registries.yaml; then echo "Registry ${registry_ip} TLS already configured for containerd" return 0 fi mkdir -p /etc/rancher/k3s/ if [ ! -f /etc/rancher/k3s/registries.yaml ] || ! grep -qs '^configs:' /etc/rancher/k3s/registries.yaml ; then echo "configs:" >> /etc/rancher/k3s/registries.yaml fi cat >> /etc/rancher/k3s/registries.yaml < /dev/null } function kubeadm_discover_private_ip() { local private_address private_address="$(cat /etc/kubernetes/manifests/kube-apiserver.yaml 2>/dev/null | grep advertise-address | awk -F'=' '{ print $2 }')" # This is needed on k8s 1.18.x as $PRIVATE_ADDRESS is found to have a newline echo "${private_address}" | tr -d '\n' } function kubeadm_get_kubeconfig() { echo "/etc/kubernetes/admin.conf" } function kubeadm_get_containerd_sock() { echo "/run/containerd/containerd.sock" } function kubeadm_get_client_kube_apiserver_crt() { echo "/etc/kubernetes/pki/apiserver-kubelet-client.crt" } function kubeadm_get_client_kube_apiserver_key() { echo "/etc/kubernetes/pki/apiserver-kubelet-client.key" } function kubeadm_get_server_ca() { echo "/etc/kubernetes/pki/ca.crt" } function kubeadm_get_server_ca_key() { echo "/etc/kubernetes/pki/ca.key" } function kubeadm_addon_for_each() { local cmd="$1" $cmd aws "$AWS_VERSION" $cmd nodeless "$NODELESS_VERSION" $cmd calico "$CALICO_VERSION" "$CALICO_S3_OVERRIDE" $cmd weave "$WEAVE_VERSION" "$WEAVE_S3_OVERRIDE" $cmd antrea "$ANTREA_VERSION" "$ANTREA_S3_OVERRIDE" $cmd rook "$ROOK_VERSION" "$ROOK_S3_OVERRIDE" $cmd ekco "$EKCO_VERSION" "$EKCO_S3_OVERRIDE" $cmd openebs "$OPENEBS_VERSION" "$OPENEBS_S3_OVERRIDE" $cmd longhorn "$LONGHORN_VERSION" "$LONGHORN_S3_OVERRIDE" $cmd minio "$MINIO_VERSION" "$MINIO_S3_OVERRIDE" $cmd contour "$CONTOUR_VERSION" "$CONTOUR_S3_OVERRIDE" $cmd registry "$REGISTRY_VERSION" "$REGISTRY_S3_OVERRIDE" $cmd prometheus "$PROMETHEUS_VERSION" "$PROMETHEUS_S3_OVERRIDE" $cmd kotsadm "$KOTSADM_VERSION" "$KOTSADM_S3_OVERRIDE" $cmd velero "$VELERO_VERSION" "$VELERO_S3_OVERRIDE" $cmd fluentd "$FLUENTD_VERSION" "$FLUENTD_S3_OVERRIDE" $cmd collectd "$COLLECTD_VERSION" "$COLLECTD_S3_OVERRIDE" $cmd cert-manager "$CERT_MANAGER_VERSION" "$CERT_MANAGER_S3_OVERRIDE" $cmd metrics-server "$METRICS_SERVER_VERSION" "$METRICS_SERVER_S3_OVERRIDE" $cmd sonobuoy "$SONOBUOY_VERSION" "$SONOBUOY_S3_OVERRIDE" $cmd goldpinger "$GOLDPINGER_VERSION" "$GOLDPINGER_S3_OVERRIDE" } function kubeadm_reset() { if [ -z "$WEAVE_TAG" ]; then WEAVE_TAG="$(get_weave_version)" fi if [ -n "$DOCKER_VERSION" ]; then kubeadm reset --force else kubeadm reset --force --cri-socket /var/run/containerd/containerd.sock fi printf "kubeadm reset completed\n" if [ -f /etc/cni/net.d/10-weave.conflist ]; then kubeadm_weave_reset fi printf "weave reset completed\n" } function kubeadm_weave_reset() { BRIDGE=weave DATAPATH=datapath CONTAINER_IFNAME=ethwe DOCKER_BRIDGE=docker0 # https://github.com/weaveworks/weave/blob/v2.8.1/weave#L461 for NETDEV in $BRIDGE $DATAPATH ; do if [ -d /sys/class/net/$NETDEV ] ; then if [ -d /sys/class/net/$NETDEV/bridge ] ; then ip link del $NETDEV else if [ -n "$DOCKER_VERSION" ]; then docker run --rm --pid host --net host --privileged --entrypoint=/usr/bin/weaveutil weaveworks/weaveexec:$WEAVE_TAG delete-datapath $NETDEV else # --pid host local guid=$(< /dev/urandom tr -dc A-Za-z0-9 | head -c16) # TODO(ethan): rke2 containerd.sock path is incorrect ctr -n=k8s.io run --rm --net-host --privileged docker.io/weaveworks/weaveexec:$WEAVE_TAG $guid /usr/bin/weaveutil delete-datapath $NETDEV fi fi fi done # Remove any lingering bridged fastdp, pcap and attach-bridge veths for VETH in $(ip -o link show | grep -o v${CONTAINER_IFNAME}[^:@]*) ; do ip link del $VETH >/dev/null 2>&1 || true done if [ "$DOCKER_BRIDGE" != "$BRIDGE" ] ; then kubeadm_run_iptables -t filter -D FORWARD -i $DOCKER_BRIDGE -o $BRIDGE -j DROP 2>/dev/null || true fi kubeadm_run_iptables -t filter -D INPUT -d 127.0.0.1/32 -p tcp --dport 6784 -m addrtype ! --src-type LOCAL -m conntrack ! --ctstate RELATED,ESTABLISHED -m comment --comment "Block non-local access to Weave Net control port" -j DROP >/dev/null 2>&1 || true kubeadm_run_iptables -t filter -D INPUT -i $DOCKER_BRIDGE -p udp --dport 53 -j ACCEPT >/dev/null 2>&1 || true kubeadm_run_iptables -t filter -D INPUT -i $DOCKER_BRIDGE -p tcp --dport 53 -j ACCEPT >/dev/null 2>&1 || true if [ -n "$DOCKER_VERSION" ]; then DOCKER_BRIDGE_IP=$(docker run --rm --pid host --net host --privileged -v /var/run/docker.sock:/var/run/docker.sock --entrypoint=/usr/bin/weaveutil weaveworks/weaveexec:$WEAVE_TAG bridge-ip $DOCKER_BRIDGE) kubeadm_run_iptables -t filter -D INPUT -i $DOCKER_BRIDGE -p tcp --dst $DOCKER_BRIDGE_IP --dport $PORT -j DROP >/dev/null 2>&1 || true kubeadm_run_iptables -t filter -D INPUT -i $DOCKER_BRIDGE -p udp --dst $DOCKER_BRIDGE_IP --dport $PORT -j DROP >/dev/null 2>&1 || true kubeadm_run_iptables -t filter -D INPUT -i $DOCKER_BRIDGE -p udp --dst $DOCKER_BRIDGE_IP --dport $(($PORT + 1)) -j DROP >/dev/null 2>&1 || true fi kubeadm_run_iptables -t filter -D FORWARD -i $BRIDGE ! -o $BRIDGE -j ACCEPT 2>/dev/null || true kubeadm_run_iptables -t filter -D FORWARD -o $BRIDGE -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT 2>/dev/null || true kubeadm_run_iptables -t filter -D FORWARD -i $BRIDGE -o $BRIDGE -j ACCEPT 2>/dev/null || true kubeadm_run_iptables -F WEAVE-NPC >/dev/null 2>&1 || true kubeadm_run_iptables -t filter -D FORWARD -o $BRIDGE -j WEAVE-NPC 2>/dev/null || true kubeadm_run_iptables -t filter -D FORWARD -o $BRIDGE -m state --state NEW -j NFLOG --nflog-group 86 2>/dev/null || true kubeadm_run_iptables -t filter -D FORWARD -o $BRIDGE -j DROP 2>/dev/null || true kubeadm_run_iptables -X WEAVE-NPC >/dev/null 2>&1 || true kubeadm_run_iptables -F WEAVE-EXPOSE >/dev/null 2>&1 || true kubeadm_run_iptables -t filter -D FORWARD -o $BRIDGE -j WEAVE-EXPOSE 2>/dev/null || true kubeadm_run_iptables -X WEAVE-EXPOSE >/dev/null 2>&1 || true kubeadm_run_iptables -t nat -F WEAVE >/dev/null 2>&1 || true kubeadm_run_iptables -t nat -D POSTROUTING -j WEAVE >/dev/null 2>&1 || true kubeadm_run_iptables -t nat -D POSTROUTING -o $BRIDGE -j ACCEPT >/dev/null 2>&1 || true kubeadm_run_iptables -t nat -X WEAVE >/dev/null 2>&1 || true for LOCAL_IFNAME in $(ip link show | grep v${CONTAINER_IFNAME}pl | cut -d ' ' -f 2 | tr -d ':') ; do ip link del ${LOCAL_IFNAME%@*} >/dev/null 2>&1 || true done } function kubeadm_run_iptables() { # -w is recent addition to iptables if [ -z "$CHECKED_IPTABLES_W" ] ; then iptables -S -w >/dev/null 2>&1 && IPTABLES_W=-w CHECKED_IPTABLES_W=1 fi iptables $IPTABLES_W "$@" } function kubeadm_containerd_restart() { systemctl restart containerd } function kubeadm_registry_containerd_configure() { local registry_ip="$1" if grep -q "plugins.\"io.containerd.grpc.v1.cri\".registry.configs.\"${registry_ip}\".tls" /etc/containerd/config.toml; then echo "Registry ${registry_ip} TLS already configured for containerd" return 0 fi cat >> /etc/containerd/config.toml </dev/null } # TODO (dan): consolidate this with the k3s distro function rke2_discover_private_ip() { echo "$(cat /var/lib/rancher/rke2/agent/pod-manifests/etcd.yaml 2>/dev/null | grep initial-cluster | grep -o "${HOSTNAME}-[a-z0-9]*=https*://[^\",]*" | sed -n -e 's/.*https*:\/\/\(.*\):.*/\1/p')" } function rke2_get_kubeconfig() { echo "/etc/rancher/rke2/rke2.yaml" } function rke2_get_containerd_sock() { echo "/run/k3s/containerd/containerd.sock" } function rke2_get_client_kube_apiserver_crt() { echo "/var/lib/rancher/rke2/server/tls/client-kube-apiserver.crt" } function rke2_get_client_kube_apiserver_key() { echo "/var/lib/rancher/rke2/server/tls/client-kube-apiserver.key" } function rke2_get_server_ca() { echo "/var/lib/rancher/rke2/server/tls/server-ca.crt" } function rke2_get_server_ca_key() { echo "/var/lib/rancher/rke2/server/tls/server-ca.key" } function rke2_addon_for_each() { local cmd="$1" if [ -n "$METRICS_SERVER_VERSION" ] && [ -z "$METRICS_SERVER_IGNORE" ]; then logWarn "⚠️ Metrics Server is distributed as part of RKE2; the version specified in the installer will be ignored." METRICS_SERVER_IGNORE=true fi $cmd aws "$AWS_VERSION" $cmd nodeless "$NODELESS_VERSION" $cmd calico "$CALICO_VERSION" "$CALICO_S3_OVERRIDE" $cmd weave "$WEAVE_VERSION" "$WEAVE_S3_OVERRIDE" $cmd rook "$ROOK_VERSION" "$ROOK_S3_OVERRIDE" $cmd openebs "$OPENEBS_VERSION" "$OPENEBS_S3_OVERRIDE" $cmd longhorn "$LONGHORN_VERSION" "$LONGHORN_S3_OVERRIDE" $cmd minio "$MINIO_VERSION" "$MINIO_S3_OVERRIDE" $cmd contour "$CONTOUR_VERSION" "$CONTOUR_S3_OVERRIDE" $cmd registry "$REGISTRY_VERSION" "$REGISTRY_S3_OVERRIDE" $cmd prometheus "$PROMETHEUS_VERSION" "$PROMETHEUS_S3_OVERRIDE" $cmd kotsadm "$KOTSADM_VERSION" "$KOTSADM_S3_OVERRIDE" $cmd velero "$VELERO_VERSION" "$VELERO_S3_OVERRIDE" $cmd fluentd "$FLUENTD_VERSION" "$FLUENTD_S3_OVERRIDE" $cmd ekco "$EKCO_VERSION" "$EKCO_S3_OVERRIDE" $cmd collectd "$COLLECTD_VERSION" "$COLLECTD_S3_OVERRIDE" $cmd cert-manager "$CERT_MANAGER_VERSION" "$CERT_MANAGER_S3_OVERRIDE" $cmd sonobuoy "$SONOBUOY_VERSION" "$SONOBUOY_S3_OVERRIDE" $cmd goldpinger "$GOLDPINGER_VERSION" "$GOLDPINGER_S3_OVERRIDE" } function rke2_reset() { . /usr/bin/rke2-uninstall.sh } function rke2_containerd_restart() { rke2_restart } function rke2_registry_containerd_configure() { local registry_ip="$1" if grep -qs ' "${registry_ip}":' /etc/rancher/rke2/registries.yaml; then echo "Registry ${registry_ip} TLS already configured for containerd" return 0 fi mkdir -p /etc/rancher/rke2/ if [ ! -f /etc/rancher/rke2/registries.yaml ] || ! grep -qs '^configs:' /etc/rancher/rke2/registries.yaml ; then echo "configs:" >> /etc/rancher/rke2/registries.yaml fi cat >> /etc/rancher/rke2/registries.yaml < /dev/null } function configure_coredns() { # Runs after kubeadm init which always resets the coredns configmap - no need to check for # and revert a previously set nameserver if [ -z "$NAMESERVER" ]; then return 0 fi kubectl -n kube-system get configmap coredns -oyaml > /tmp/Corefile # Example lines to replace from k8s 1.17 and 1.19 # "forward . /etc/resolv.conf" => "forward . 8.8.8.8" # "forward . /etc/resolv.conf {" => "forward . 8.8.8.8 {" sed -i "s/forward \. \/etc\/resolv\.conf/forward \. ${NAMESERVER}/" /tmp/Corefile kubectl -n kube-system replace configmap coredns -f /tmp/Corefile kubectl -n kube-system rollout restart deployment/coredns } function init() { logStep "Initialize Kubernetes" kubernetes_maybe_generate_bootstrap_token API_SERVICE_ADDRESS="$PRIVATE_ADDRESS:6443" if [ "$HA_CLUSTER" = "1" ]; then API_SERVICE_ADDRESS="$LOAD_BALANCER_ADDRESS:$LOAD_BALANCER_PORT" fi local oldLoadBalancerAddress=$(kubernetes_load_balancer_address) if commandExists ekco_handle_load_balancer_address_change_pre_init; then ekco_handle_load_balancer_address_change_pre_init $oldLoadBalancerAddress $LOAD_BALANCER_ADDRESS fi if [ "$EKCO_ENABLE_INTERNAL_LOAD_BALANCER" = "1" ] && commandExists ekco_bootstrap_internal_lb; then ekco_bootstrap_internal_lb fi kustomize_kubeadm_init=./kustomize/kubeadm/init CERT_KEY= CERT_KEY_EXPIRY= if [ "$HA_CLUSTER" = "1" ]; then CERT_KEY=$(< /dev/urandom tr -dc a-f0-9 | head -c64) CERT_KEY_EXPIRY=$(TZ="UTC" date -d "+2 hour" --rfc-3339=second | sed 's/ /T/') insert_patches_strategic_merge \ $kustomize_kubeadm_init/kustomization.yaml \ patch-certificate-key.yaml fi # kustomize can merge multiple list patches in some cases but it is not working for me on the # ClusterConfiguration.apiServer.certSANs list if [ -n "$PUBLIC_ADDRESS" ] && [ -n "$LOAD_BALANCER_ADDRESS" ]; then insert_patches_strategic_merge \ $kustomize_kubeadm_init/kustomization.yaml \ patch-public-and-load-balancer-address.yaml elif [ -n "$PUBLIC_ADDRESS" ]; then insert_patches_strategic_merge \ $kustomize_kubeadm_init/kustomization.yaml \ patch-public-address.yaml elif [ -n "$LOAD_BALANCER_ADDRESS" ]; then insert_patches_strategic_merge \ $kustomize_kubeadm_init/kustomization.yaml \ patch-load-balancer-address.yaml fi # Add kubeadm init patches from addons. for patch in $(ls -1 ${kustomize_kubeadm_init}-patches/* 2>/dev/null || echo); do patch_basename="$(basename $patch)" cp $patch $kustomize_kubeadm_init/$patch_basename insert_patches_strategic_merge \ $kustomize_kubeadm_init/kustomization.yaml \ $patch_basename done mkdir -p "$KUBEADM_CONF_DIR" kubectl kustomize $kustomize_kubeadm_init > $KUBEADM_CONF_DIR/kubeadm-init-raw.yaml render_yaml_file $KUBEADM_CONF_DIR/kubeadm-init-raw.yaml > $KUBEADM_CONF_FILE # kustomize requires assests have a metadata field while kubeadm config will reject yaml containing it # this uses a go binary found in kurl/cmd/yamlutil to strip the metadata field from the yaml # cp $KUBEADM_CONF_FILE $KUBEADM_CONF_DIR/kubeadm_conf_copy_in $DIR/bin/yamlutil -r -fp $KUBEADM_CONF_DIR/kubeadm_conf_copy_in -yf metadata mv $KUBEADM_CONF_DIR/kubeadm_conf_copy_in $KUBEADM_CONF_FILE if [ "$KUBERNETES_TARGET_VERSION_MINOR" -ge "21" ]; then cat << EOF >> $KUBEADM_CONF_FILE apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration shutdownGracePeriod: 30s shutdownGracePeriodCriticalPods: 10s --- EOF else cat << EOF >> $KUBEADM_CONF_FILE apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration cgroupDriver: systemd --- EOF fi # When no_proxy changes kubeadm init rewrites the static manifests and fails because the api is # restarting. Trigger the restart ahead of time and wait for it to be healthy. if [ -f "/etc/kubernetes/manifests/kube-apiserver.yaml" ] && [ -n "$no_proxy" ] && ! cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep -q "$no_proxy"; then kubeadm init phase control-plane apiserver --config $KUBEADM_CONF_FILE sleep 2 if ! spinner_until 60 kubernetes_api_is_healthy; then echo "Failed to wait for kubernetes API restart after no_proxy change" # continue fi fi if [ "$HA_CLUSTER" = "1" ]; then UPLOAD_CERTS="--upload-certs" fi # kubeadm init temporarily taints this node which causes rook to move any mons on it and may # lead to a loss of quorum disable_rook_ceph_operator # since K8s 1.19.1 kubeconfigs point to local API server even in HA setup. When upgrading from # earlier versions and using a load balancer, kubeadm init will bail because the kubeconfigs # already exist pointing to the load balancer rm -rf /etc/kubernetes/*.conf # Regenerate api server cert in case load balancer address changed if [ -f /etc/kubernetes/pki/apiserver.crt ]; then mv -f /etc/kubernetes/pki/apiserver.crt /tmp/ fi if [ -f /etc/kubernetes/pki/apiserver.key ]; then mv -f /etc/kubernetes/pki/apiserver.key /tmp/ fi set -o pipefail kubeadm init \ --ignore-preflight-errors=all \ --config $KUBEADM_CONF_FILE \ $UPLOAD_CERTS \ | tee /tmp/kubeadm-init set +o pipefail # Node would be cordoned if migrated from docker to containerd local node=$(hostname | tr '[:upper:]' '[:lower:]') kubectl uncordon "$node" if [ -n "$LOAD_BALANCER_ADDRESS" ]; then spinner_until 120 cert_has_san "$PRIVATE_ADDRESS:6443" "$LOAD_BALANCER_ADDRESS" fi if commandExists ekco_cleanup_bootstrap_internal_lb; then ekco_cleanup_bootstrap_internal_lb fi spinner_kubernetes_api_stable exportKubeconfig KUBEADM_TOKEN_CA_HASH=$(cat /tmp/kubeadm-init | grep 'discovery-token-ca-cert-hash' | awk '{ print $2 }' | head -1) wait_for_nodes enable_rook_ceph_operator DID_INIT_KUBERNETES=1 logSuccess "Kubernetes Master Initialized" local currentLoadBalancerAddress=$(kubernetes_load_balancer_address) if [ "$currentLoadBalancerAddress" != "$oldLoadBalancerAddress" ]; then # restart scheduler and controller-manager on this node so they use the new address mv /etc/kubernetes/manifests/kube-scheduler.yaml /tmp/ && sleep 1 && mv /tmp/kube-scheduler.yaml /etc/kubernetes/manifests/ mv /etc/kubernetes/manifests/kube-controller-manager.yaml /tmp/ && sleep 1 && mv /tmp/kube-controller-manager.yaml /etc/kubernetes/manifests/ if kubernetes_has_remotes; then if commandExists ekco_handle_load_balancer_address_change_kubeconfigs; then ekco_handle_load_balancer_address_change_kubeconfigs else # Manual steps for ekco < 0.11.0 printf "${YELLOW}\nThe load balancer address has changed. Run the following on all remote nodes to use the new address${NC}\n" printf "\n" if [ "$AIRGAP" = "1" ]; then printf "${GREEN} cat ./tasks.sh | sudo bash -s set-kubeconfig-server https://${currentLoadBalancerAddress}${NC}\n" else local prefix= prefix="$(build_installer_prefix "${INSTALLER_ID}" "${KURL_VERSION}" "${KURL_URL}" "${PROXY_ADDRESS}")" printf "${GREEN} ${prefix}tasks.sh | sudo bash -s set-kubeconfig-server https://${currentLoadBalancerAddress}${NC}\n" fi printf "\n" printf "Continue? " confirmN fi if commandExists ekco_handle_load_balancer_address_change_post_init; then ekco_handle_load_balancer_address_change_post_init $oldLoadBalancerAddress $LOAD_BALANCER_ADDRESS fi fi # restart kube-proxies so they use the new address kubectl -n kube-system delete pods --selector=k8s-app=kube-proxy fi labelNodes kubectl cluster-info # create kurl namespace if it doesn't exist kubectl get ns kurl 2>/dev/null 1>/dev/null || kubectl create ns kurl 1>/dev/null spinner_until 120 kubernetes_default_service_account_exists spinner_until 120 kubernetes_service_exists logSuccess "Cluster Initialized" configure_coredns if commandExists registry_init; then registry_init if [ -n "$CONTAINERD_VERSION" ]; then ${K8S_DISTRO}_registry_containerd_configure "${DOCKER_REGISTRY_IP}" ${K8S_DISTRO}_containerd_restart spinner_kubernetes_api_healthy fi fi } function post_init() { BOOTSTRAP_TOKEN_EXPIRY=$(kubeadm token list | grep $BOOTSTRAP_TOKEN | awk '{print $3}') kurl_config uninstall_docker } function kubernetes_maybe_generate_bootstrap_token() { if [ -z "$BOOTSTRAP_TOKEN" ]; then logStep "generate kubernetes bootstrap token" BOOTSTRAP_TOKEN=$(kubeadm token generate) fi echo "Kubernetes bootstrap token: ${BOOTSTRAP_TOKEN}" echo "This token will expire in 24 hours" } function kurl_config() { if kubernetes_resource_exists kube-system configmap kurl-config; then kubectl -n kube-system delete configmap kurl-config fi kubectl -n kube-system create configmap kurl-config \ --from-literal=kurl_url="$KURL_URL" \ --from-literal=installer_id="$INSTALLER_ID" \ --from-literal=ha="$HA_CLUSTER" \ --from-literal=airgap="$AIRGAP" \ --from-literal=ca_hash="$KUBEADM_TOKEN_CA_HASH" \ --from-literal=docker_registry_ip="$DOCKER_REGISTRY_IP" \ --from-literal=kubernetes_api_address="$API_SERVICE_ADDRESS" \ --from-literal=bootstrap_token="$BOOTSTRAP_TOKEN" \ --from-literal=bootstrap_token_expiration="$BOOTSTRAP_TOKEN_EXPIRY" \ --from-literal=cert_key="$CERT_KEY" \ --from-literal=upload_certs_expiration="$CERT_KEY_EXPIRY" \ --from-literal=service_cidr="$SERVICE_CIDR" \ --from-literal=pod_cidr="$POD_CIDR" \ --from-literal=kurl_install_directory="$KURL_INSTALL_DIRECTORY_FLAG" } function outro() { echo if [ -z "$PUBLIC_ADDRESS" ]; then if [ -z "$PRIVATE_ADDRESS" ]; then PUBLIC_ADDRESS="" PRIVATE_ADDRESS="" else PUBLIC_ADDRESS="$PRIVATE_ADDRESS" fi fi local common_flags common_flags="${common_flags}$(get_docker_registry_ip_flag "${DOCKER_REGISTRY_IP}")" common_flags="${common_flags}$(get_additional_no_proxy_addresses_flag "${PROXY_ADDRESS}" "${SERVICE_CIDR},${POD_CIDR}")" common_flags="${common_flags}$(get_kurl_install_directory_flag "${KURL_INSTALL_DIRECTORY_FLAG}")" common_flags="${common_flags}$(get_remotes_flags)" KUBEADM_TOKEN_CA_HASH=$(cat /tmp/kubeadm-init | grep 'discovery-token-ca-cert-hash' | awk '{ print $2 }' | head -1) printf "\n" printf "\t\t${GREEN}Installation${NC}\n" printf "\t\t${GREEN} Complete ✔${NC}\n" addon_outro printf "\n" kubeconfig_setup_outro printf "\n" if [ "$OUTRO_NOTIFIY_TO_RESTART_DOCKER" = "1" ]; then printf "\n" printf "\n" printf "The local /etc/docker/daemon.json has been merged with the spec from the installer, but has not been applied. To apply restart docker." printf "\n" printf "\n" printf "${GREEN} systemctl daemon-reload${NC}\n" printf "${GREEN} systemctl restart docker${NC}\n" printf "\n" printf "These settings will automatically be applied on the next restart." printf "\n" fi printf "\n" printf "\n" local prefix= prefix="$(build_installer_prefix "${INSTALLER_ID}" "${KURL_VERSION}" "${KURL_URL}" "${PROXY_ADDRESS}")" if [ "$HA_CLUSTER" = "1" ]; then printf "Master node join commands expire after two hours, and worker node join commands expire after 24 hours.\n" printf "\n" if [ "$AIRGAP" = "1" ]; then printf "To generate new node join commands, run ${GREEN}cat ./tasks.sh | sudo bash -s join_token ha airgap${NC} on an existing master node.\n" else printf "To generate new node join commands, run ${GREEN}${prefix}tasks.sh | sudo bash -s join_token ha${NC} on an existing master node.\n" fi else printf "Node join commands expire after 24 hours.\n" printf "\n" if [ "$AIRGAP" = "1" ]; then printf "To generate new node join commands, run ${GREEN}cat ./tasks.sh | sudo bash -s join_token airgap${NC} on this node.\n" else printf "To generate new node join commands, run ${GREEN}${prefix}tasks.sh | sudo bash -s join_token${NC} on this node.\n" fi fi if [ "$AIRGAP" = "1" ]; then printf "\n" printf "To add worker nodes to this installation, copy and unpack this bundle on your other nodes, and run the following:" printf "\n" printf "\n" printf "${GREEN} cat ./join.sh | sudo bash -s airgap kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=${KUBEADM_TOKEN_CA_HASH} kubernetes-version=${KUBERNETES_VERSION}${common_flags}\n" printf "${NC}" printf "\n" printf "\n" if [ "$HA_CLUSTER" = "1" ]; then printf "\n" printf "To add ${GREEN}MASTER${NC} nodes to this installation, copy and unpack this bundle on your other nodes, and run the following:" printf "\n" printf "\n" printf "${GREEN} cat ./join.sh | sudo bash -s airgap kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=${KUBEADM_TOKEN_CA_HASH} kubernetes-version=${KUBERNETES_VERSION} cert-key=${CERT_KEY} control-plane${common_flags}\n" printf "${NC}" printf "\n" printf "\n" fi else printf "\n" printf "To add worker nodes to this installation, run the following script on your other nodes:" printf "\n" printf "${GREEN} ${prefix}join.sh | sudo bash -s kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=${KUBEADM_TOKEN_CA_HASH} kubernetes-version=${KUBERNETES_VERSION}${common_flags}\n" printf "${NC}" printf "\n" printf "\n" if [ "$HA_CLUSTER" = "1" ]; then printf "\n" printf "To add ${GREEN}MASTER${NC} nodes to this installation, run the following script on your other nodes:" printf "\n" printf "${GREEN} ${prefix}join.sh | sudo bash -s kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=$KUBEADM_TOKEN_CA_HASH kubernetes-version=${KUBERNETES_VERSION} cert-key=${CERT_KEY} control-plane${common_flags}\n" printf "${NC}" printf "\n" printf "\n" fi fi } function all_kubernetes_install() { kubernetes_host install_helm ${K8S_DISTRO}_addon_for_each addon_load helm_load init apply_installer_crd } function report_kubernetes_install() { report_addon_start "kubernetes" "$KUBERNETES_VERSION" export REPORTING_CONTEXT_INFO="kubernetes $KUBERNETES_VERSION" all_kubernetes_install export REPORTING_CONTEXT_INFO="" report_addon_success "kubernetes" "$KUBERNETES_VERSION" } K8S_DISTRO=kubeadm function main() { require_root_user get_patch_yaml "$@" maybe_read_kurl_config_from_cluster if [ "$AIRGAP" = "1" ]; then move_airgap_assets fi pushd_install_directory yaml_airgap proxy_bootstrap download_util_binaries get_machine_id merge_yaml_specs apply_bash_flag_overrides "$@" parse_yaml_into_bash_variables MASTER=1 # parse_yaml_into_bash_variables will unset master prompt_license # ALPHA FLAGS if [ -n "$RKE2_VERSION" ]; then K8S_DISTRO=rke2 rke2_main "$@" exit 0 elif [ -n "$K3S_VERSION" ]; then K8S_DISTRO=k3s k3s_main "$@" exit 0 fi export KUBECONFIG=/etc/kubernetes/admin.conf is_ha parse_kubernetes_target_version discover full-cluster report_install_start trap ctrl_c SIGINT # trap ctrl+c (SIGINT) and handle it by reporting that the user exited intentionally (along with the line/version/etc) trap trap_report_error ERR # trap errors and handle it by reporting the error line and parent function preflights common_prompts journald_persistent configure_proxy configure_no_proxy_preinstall ${K8S_DISTRO}_addon_for_each addon_fetch if [ -z "$CURRENT_KUBERNETES_VERSION" ]; then host_preflights "1" "0" "0" else host_preflights "1" "0" "1" fi install_host_dependencies get_common setup_kubeadm_kustomize ${K8S_DISTRO}_addon_for_each addon_pre_init discover_pod_subnet discover_service_subnet configure_no_proxy install_cri get_shared report_upgrade_kubernetes report_kubernetes_install export SUPPORT_BUNDLE_READY=1 # allow ctrl+c and ERR traps to collect support bundles now that k8s is installed kurl_init_config ${K8S_DISTRO}_addon_for_each addon_install maybe_cleanup_rook helmfile_sync post_init outro package_cleanup popd_install_directory report_install_success } main "$@"