#!/bin/bash set -e MASTER=1 DIR=. KURL_URL="https://kurl.sh" DIST_URL="https://kurl-sh.s3.amazonaws.com/dist" INSTALLER_ID="waydevonprem" REPLICATED_APP_URL="https://replicated.app" KURL_UTIL_IMAGE="replicated/kurl-util:v2021.01.22-0" KURL_BIN_UTILS_FILE="kurl-bin-utils-v2021.01.22-0.tar.gz" # STEP_VERSIONS array is generated by the server and injected at runtime based on supported k8s versions STEP_VERSIONS=(0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 0.0.0 1.15.3 1.16.4 1.17.13 1.18.10 1.19.7) INSTALLER_YAML="apiVersion: cluster.kurl.sh/v1beta1 kind: Installer metadata: name: waydevonprem spec: kubernetes: version: 1.19.7 docker: version: 19.03.10 weave: version: 2.6.5 rook: version: 1.0.4 contour: version: 1.11.0 registry: version: 2.7.1 prometheus: version: 0.33.0 kotsadm: version: 1.29.1 " function addon_for_each() { local cmd="$1" $cmd aws "$AWS_VERSION" $cmd nodeless "$NODELESS_VERSION" $cmd calico "$CALICO_VERSION" "$CALICO_S3_OVERRIDE" $cmd weave "$WEAVE_VERSION" "$WEAVE_S3_OVERRIDE" $cmd rook "$ROOK_VERSION" "$ROOK_S3_OVERRIDE" $cmd openebs "$OPENEBS_VERSION" "$OPENEBS_S3_OVERRIDE" $cmd minio "$MINIO_VERSION" "$MINIO_S3_OVERRIDE" $cmd contour "$CONTOUR_VERSION" "$CONTOUR_S3_OVERRIDE" $cmd registry "$REGISTRY_VERSION" "$REGISTRY_S3_OVERRIDE" $cmd prometheus "$PROMETHEUS_VERSION" "$PROMETHEUS_S3_OVERRIDE" $cmd kotsadm "$KOTSADM_VERSION" "$KOTSADM_S3_OVERRIDE" $cmd velero "$VELERO_VERSION" "$VELERO_S3_OVERRIDE" $cmd fluentd "$FLUENTD_VERSION" "$FLUENTD_S3_OVERRIDE" $cmd ekco "$EKCO_VERSION" "$EKCO_S3_OVERRIDE" $cmd collectd "$COLLECTD_VERSION" "$COLLECTD_S3_OVERRIDE" $cmd cert-manager "$CERT_MANAGER_VERSION" "$CERT_MANAGER_S3_OVERRIDE" $cmd metrics-server "$METRICS_SERVER_VERSION" "$METRICS_SERVER_S3_OVERRIDE" } ADDONS_HAVE_HOST_COMPONENTS=0 function addon_install() { local name=$1 local version=$2 if [ -z "$version" ]; then return 0 fi logStep "Addon $name $version" report_addon_start "$name" "$version" rm -rf $DIR/kustomize/$name mkdir -p $DIR/kustomize/$name . $DIR/addons/$name/$version/install.sh || addon_install_fail "$name" "$version" $name || addon_install_fail "$name" "$version" if commandExists ${name}_join; then ADDONS_HAVE_HOST_COMPONENTS=1 fi report_addon_success "$name" "$version" } function addon_pre_init() { local name=$1 local version=$2 local s3Override=$3 if [ -z "$version" ]; then return 0 fi if [ "$AIRGAP" != "1" ]; then if [ -n "$s3Override" ]; then addon_fetch "$s3Override" elif [ -n "$DIST_URL" ]; then addon_fetch "$DIST_URL/$name-$version.tar.gz" fi fi . $DIR/addons/$name/$version/install.sh if commandExists ${name}_pre_init; then ${name}_pre_init fi } function addon_join() { local name=$1 local version=$2 local s3Override=$3 if [ -z "$version" ]; then return 0 fi if [ "$AIRGAP" != "1" ]; then if [ -n "$s3Override" ]; then addon_fetch "$s3Override" elif [ -n "$DIST_URL" ]; then addon_fetch "$DIST_URL/$name-$version.tar.gz" fi fi addon_load "$name" "$version" . $DIR/addons/$name/$version/install.sh if commandExists ${name}_join; then logStep "Addon $name $version" ${name}_join fi } function addon_load() { local name=$1 local version=$2 if [ -z "$version" ]; then return 0 fi load_images $DIR/addons/$name/$version/images } function addon_fetch() { local url=$1 local archiveName=$(basename $url) echo "Fetching $archiveName" curl -sSLO "$url" tar xf $archiveName rm $archiveName } function addon_outro() { if [ -n "$PROXY_ADDRESS" ]; then ADDONS_HAVE_HOST_COMPONENTS=1 fi if [ "$ADDONS_HAVE_HOST_COMPONENTS" = "1" ] && kubernetes_has_remotes; then local dockerRegistryIP="" if [ -n "$DOCKER_REGISTRY_IP" ]; then dockerRegistryIP=" docker-registry-ip=$DOCKER_REGISTRY_IP" fi local proxyFlag="" local noProxyAddrs="" if [ -n "$PROXY_ADDRESS" ]; then proxyFlag=" -x $PROXY_ADDRESS" noProxyAddrs=" additional-no-proxy-addresses=${SERVICE_CIDR},${POD_CIDR}" fi local prefix="curl -sSL${proxyFlag} $KURL_URL/$INSTALLER_ID/" if [ "$AIRGAP" = "1" ] || [ -z "$KURL_URL" ]; then prefix="cat " fi printf "\n${YELLOW}Run this script on all remote nodes to apply changes${NC}\n" if [ "$AIRGAP" = "1" ]; then printf "\n\t${GREEN}${prefix}upgrade.sh | sudo bash -s airgap ${dockerRegistryIP}${noProxyAddrs}${NC}\n\n" else printf "\n\t${GREEN}${prefix}upgrade.sh | sudo bash -s${dockerRegistryIP}${noProxyAddrs}${NC}\n\n" fi printf "Press enter to proceed\n" prompt fi while read -r name; do if commandExists ${name}_outro; then ${name}_outro fi done < <(find addons/ -mindepth 1 -maxdepth 1 -type d -printf '%f\n') } STORAGE_PROVISIONER=rook GREEN='\033[0;32m' BLUE='\033[0;94m' LIGHT_BLUE='\033[0;34m' YELLOW='\033[0;33m' RED='\033[0;31m' NC='\033[0m' # No Color KUBEADM_CONF_DIR=/opt/replicated KUBEADM_CONF_FILE="$KUBEADM_CONF_DIR/kubeadm.conf" commandExists() { command -v "$@" > /dev/null 2>&1 } insertOrReplaceJsonParam() { if ! [ -f "$1" ]; then # If settings file does not exist mkdir -p "$(dirname "$1")" echo "{\"$2\": \"$3\"}" > "$1" else # Settings file exists if grep -q -E "\"$2\" *: *\"[^\"]*\"" "$1"; then # If settings file contains named setting, replace it sed -i -e "s/\"$2\" *: *\"[^\"]*\"/\"$2\": \"$3\"/g" "$1" else # Insert into settings file (with proper commas) if [ $(wc -c <"$1") -ge 5 ]; then # File long enough to actually have an entry, insert "name": "value",\n after first { _commonJsonReplaceTmp="$(awk "NR==1,/^{/{sub(/^{/, \"{\\\"$2\\\": \\\"$3\\\", \")} 1" "$1")" echo "$_commonJsonReplaceTmp" > "$1" else # file not long enough to actually have contents, replace wholesale echo "{\"$2\": \"$3\"}" > "$1" fi fi fi } semverParse() { major="${1%%.*}" minor="${1#$major.}" minor="${minor%%.*}" patch="${1#$major.$minor.}" patch="${patch%%[-.]*}" } logSuccess() { printf "${GREEN}✔ $1${NC}\n" 1>&2 } logStep() { printf "${BLUE}⚙ $1${NC}\n" 1>&2 } logSubstep() { printf "\t${LIGHT_BLUE}- $1${NC}\n" 1>&2 } logFail() { printf "${RED}$1${NC}\n" 1>&2 } logWarn() { printf "${YELLOW}$1${NC}\n" 1>&2 } bail() { logFail "$@" exit 1 } waitForNodes() { n=0 while ! kubectl get nodes >/dev/null 2>&1; do n="$(( $n + 1 ))" if [ "$n" -ge "120" ]; then # this should exit script on non-zero exit code and print error message kubectl get nodes 1>/dev/null fi sleep 2 done } # Label nodes as provisioned by kurl installation # (these labels should have been added by kurl installation. # See kubeadm-init and kubeadm-join yamk files. # This bit will ensure the labels are added for pre-existing cluster # during a kurl upgrade.) labelNodes() { for NODE in $(kubectl get nodes --no-headers | awk '{print $1}');do kurl_label=$(kubectl describe nodes $NODE | grep "kurl.sh\/cluster=true") || true if [[ -z $kurl_label ]];then kubectl label node --overwrite $NODE kurl.sh/cluster=true; fi done } spinnerPodRunning() { namespace=$1 podPrefix=$2 local delay=0.75 local spinstr='|/-\' while ! kubectl -n "$namespace" get pods 2>/dev/null | grep "^$podPrefix" | awk '{ print $3}' | grep '^Running$' > /dev/null ; do local temp=${spinstr#?} printf " [%c] " "$spinstr" local spinstr=$temp${spinstr%"$temp"} sleep $delay printf "\b\b\b\b\b\b" done printf " \b\b\b\b" } COMPARE_DOCKER_VERSIONS_RESULT= compareDockerVersions() { # reset COMPARE_DOCKER_VERSIONS_RESULT= compareDockerVersionsIgnorePatch "$1" "$2" if [ "$COMPARE_DOCKER_VERSIONS_RESULT" -ne "0" ]; then return fi parseDockerVersion "$1" _a_patch="$DOCKER_VERSION_PATCH" parseDockerVersion "$2" _b_patch="$DOCKER_VERSION_PATCH" if [ "$_a_patch" -lt "$_b_patch" ]; then COMPARE_DOCKER_VERSIONS_RESULT=-1 return fi if [ "$_a_patch" -gt "$_b_patch" ]; then COMPARE_DOCKER_VERSIONS_RESULT=1 return fi COMPARE_DOCKER_VERSIONS_RESULT=0 } COMPARE_DOCKER_VERSIONS_RESULT= compareDockerVersionsIgnorePatch() { # reset COMPARE_DOCKER_VERSIONS_RESULT= parseDockerVersion "$1" _a_major="$DOCKER_VERSION_MAJOR" _a_minor="$DOCKER_VERSION_MINOR" parseDockerVersion "$2" _b_major="$DOCKER_VERSION_MAJOR" _b_minor="$DOCKER_VERSION_MINOR" if [ "$_a_major" -lt "$_b_major" ]; then COMPARE_DOCKER_VERSIONS_RESULT=-1 return fi if [ "$_a_major" -gt "$_b_major" ]; then COMPARE_DOCKER_VERSIONS_RESULT=1 return fi if [ "$_a_minor" -lt "$_b_minor" ]; then COMPARE_DOCKER_VERSIONS_RESULT=-1 return fi if [ "$_a_minor" -gt "$_b_minor" ]; then COMPARE_DOCKER_VERSIONS_RESULT=1 return fi COMPARE_DOCKER_VERSIONS_RESULT=0 } DOCKER_VERSION_MAJOR= DOCKER_VERSION_MINOR= DOCKER_VERSION_PATCH= DOCKER_VERSION_RELEASE= parseDockerVersion() { # reset DOCKER_VERSION_MAJOR= DOCKER_VERSION_MINOR= DOCKER_VERSION_PATCH= DOCKER_VERSION_RELEASE= if [ -z "$1" ]; then return fi OLD_IFS="$IFS" && IFS=. && set -- $1 && IFS="$OLD_IFS" DOCKER_VERSION_MAJOR=$1 DOCKER_VERSION_MINOR=$2 OLD_IFS="$IFS" && IFS=- && set -- $3 && IFS="$OLD_IFS" DOCKER_VERSION_PATCH=$1 DOCKER_VERSION_RELEASE=$2 } exportKubeconfig() { cp /etc/kubernetes/admin.conf $HOME/admin.conf chown $SUDO_USER:$SUDO_GID $HOME/admin.conf current_user_sudo_group if [ -n "$FOUND_SUDO_GROUP" ]; then chown root:$FOUND_SUDO_GROUP /etc/kubernetes/admin.conf fi chmod 440 /etc/kubernetes/admin.conf if ! grep -q "kubectl completion bash" /etc/profile; then echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >> /etc/profile echo "source <(kubectl completion bash)" >> /etc/profile fi } function kubernetes_resource_exists() { local namespace=$1 local kind=$2 local name=$3 kubectl -n "$namespace" get "$kind" "$name" &>/dev/null } function install_cri() { if [ -n "$DOCKER_VERSION" ]; then install_docker apply_docker_config elif [ -n "$CONTAINERD_VERSION" ]; then containerd_get_host_packages_online "$CONTAINERD_VERSION" . $DIR/addons/containerd/$CONTAINERD_VERSION/install.sh containerd_install fi } function load_images() { if [ -n "$DOCKER_VERSION" ]; then find "$1" -type f | xargs -I {} bash -c "docker load < {}" else find "$1" -type f | xargs -I {} bash -c "cat {} | gunzip | ctr -n=k8s.io images import -" fi } # try a command every 2 seconds until it succeeds, up to 30 tries max; useful for kubectl commands # where the Kubernetes API could be restarting function try_1m() { local fn="$1" local args=${@:2} n=0 while ! $fn $args 2>/dev/null ; do n="$(( $n + 1 ))" if [ "$n" -ge "30" ]; then # for the final try print the error and let it exit $fn $args exit 1 # in case we're in a `set +e` state fi sleep 2 done } # Run a test every second with a spinner until it succeeds function spinner_until() { local timeoutSeconds="$1" local cmd="$2" local args=${@:3} if [ -z "$timeoutSeconds" ]; then timeoutSeconds=-1 fi local delay=1 local elapsed=0 local spinstr='|/-\' while ! $cmd $args; do elapsed=$(($elapsed + $delay)) if [ "$timeoutSeconds" -ge 0 ] && [ "$elapsed" -gt "$timeoutSeconds" ]; then return 1 fi local temp=${spinstr#?} printf " [%c] " "$spinstr" local spinstr=$temp${spinstr%"$temp"} sleep $delay printf "\b\b\b\b\b\b" done } function get_shared() { if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then curl -sSOL $DIST_URL/common.tar.gz tar xf common.tar.gz rm common.tar.gz fi if [ -f shared/kurl-util.tar ]; then if [ -n "$DOCKER_VERSION" ]; then docker load < shared/kurl-util.tar else ctr -n=k8s.io images import shared/kurl-util.tar fi fi } function all_sudo_groups() { # examples of lines we're looking for in any sudo config files to find group with root privileges # %wheel ALL = (ALL) ALL # %google-sudoers ALL=(ALL:ALL) NOPASSWD:ALL # %admin ALL=(ALL) ALL cat /etc/sudoers | grep -Eo '^%\S+\s+ALL\s?=.*ALL\b' | awk '{print $1 }' | sed 's/%//' find /etc/sudoers.d/ -type f | xargs cat | grep -Eo '^%\S+\s+ALL\s?=.*ALL\b' | awk '{print $1 }' | sed 's/%//' } # if the sudo group cannot be detected default to root FOUND_SUDO_GROUP= function current_user_sudo_group() { if [ -z "$SUDO_UID" ]; then return 0 fi # return the first sudo group the current user belongs to while read -r groupName; do if id "$SUDO_UID" -Gn | grep -q "\b${groupName}\b"; then FOUND_SUDO_GROUP="$groupName" return 0 fi done < <(all_sudo_groups) } function kubeconfig_setup_outro() { current_user_sudo_group if [ -n "$FOUND_SUDO_GROUP" ]; then printf "To access the cluster with kubectl, reload your shell:\n" printf "\n" printf "${GREEN} bash -l${NC}\n" return fi local owner="$SUDO_UID" if [ -z "$owner" ]; then # not currently running via sudo owner="$USER" else # running via sudo - automatically create ~/.kube/config if it does not exist ownerdir=`eval echo "~$(id -un $owner)"` if [ ! -f "$ownerdir/.kube/config" ]; then mkdir -p $ownerdir/.kube cp /etc/kubernetes/admin.conf $ownerdir/.kube/config chown -R $owner $ownerdir/.kube printf "To access the cluster with kubectl, ensure the KUBECONFIG environment variable is unset:\n" printf "\n" printf "${GREEN} echo unset KUBECONFIG >> ~/.profile${NC}\n" printf "${GREEN} bash -l${NC}\n" return fi fi printf "To access the cluster with kubectl, copy kubeconfig to your home directory:\n" printf "\n" printf "${GREEN} cp /etc/kubernetes/admin.conf ~/.kube/config${NC}\n" printf "${GREEN} chown -R ${owner} ~/.kube${NC}\n" printf "${GREEN} echo unset KUBECONFIG >> ~/.profile${NC}\n" printf "${GREEN} bash -l${NC}\n" printf "\n" printf "You will likely need to use sudo to copy and chown admin.conf.\n" } splitHostPort() { oIFS="$IFS"; IFS=":" read -r HOST PORT <<< "$1"; IFS="$oIFS" } isValidIpv4() { if echo "$1" | grep -qs '^[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*\.[0-9][0-9]*$'; then return 0 else return 1 fi } isValidIpv6() { if echo "$1" | grep -qs "^\([0-9a-fA-F]\{0,4\}:\)\{1,7\}[0-9a-fA-F]\{0,4\}$"; then return 0 else return 1 fi } function cert_has_san() { local address=$1 local san=$2 echo "Q" | openssl s_client -connect "$address" 2>/dev/null | openssl x509 -noout -text 2>/dev/null | grep --after-context=1 'X509v3 Subject Alternative Name' | grep -q "$2" } # By default journald persists logs if the directory /var/log/journal exists so create it if it's # not found. Sysadmins may still disable persistent logging with /etc/systemd/journald.conf. function journald_persistent() { if [ -d /var/log/journal ]; then return 0 fi mkdir -p /var/log/journal systemd-tmpfiles --create --prefix /var/log/journal systemctl restart systemd-journald journalctl --flush } function rm_file() { if [ -f "$1" ]; then rm $1 fi } function install_host_packages() { local dir=$1 case "$LSB_DIST" in ubuntu) DEBIAN_FRONTEND=noninteractive dpkg --install --force-depends-version ${dir}/ubuntu-${DIST_VERSION}/*.deb ;; centos|rhel|amzn) if [[ "$DIST_VERSION" =~ ^8 ]]; then rpm --upgrade --force --nodeps ${dir}/rhel-8/*.rpm else rpm --upgrade --force --nodeps ${dir}/rhel-7/*.rpm fi ;; esac } # Checks if the provided param is in the current path, and if it is not adds it # this is useful for systems where /usr/local/bin is not in the path for root function path_add() { if [ -d "$1" ] && [[ ":$PATH:" != *":$1:"* ]]; then PATH="${PATH:+"$PATH:"}$1" fi } function discover() { local fullCluster="$1" detectLsbDist discoverCurrentKubernetesVersion "$fullCluster" # never upgrade docker underneath kubernetes if docker version >/dev/null 2>&1 ; then SKIP_DOCKER_INSTALL=1 echo "Docker already exists on this machine so no docker install will be performed" fi if ctr --version >/dev/null 2>&1 ; then SKIP_CONTAINERD_INSTALL=1 echo "Containerd already exists on this machine so no containerd install will be performed" fi discover_public_ip discover_private_ip KERNEL_MAJOR=$(uname -r | cut -d'.' -f1) KERNEL_MINOR=$(uname -r | cut -d'.' -f2) } LSB_DIST= DIST_VERSION= DIST_VERSION_MAJOR= detectLsbDist() { _dist= _error_msg="We have checked /etc/os-release and /etc/centos-release files." if [ -f /etc/centos-release ] && [ -r /etc/centos-release ]; then # CentOS 6 example: CentOS release 6.9 (Final) # CentOS 7 example: CentOS Linux release 7.5.1804 (Core) _dist="$(cat /etc/centos-release | cut -d" " -f1)" _version="$(cat /etc/centos-release | sed 's/Linux //' | cut -d" " -f3 | cut -d "." -f1-2)" elif [ -f /etc/os-release ] && [ -r /etc/os-release ]; then _dist="$(. /etc/os-release && echo "$ID")" _version="$(. /etc/os-release && echo "$VERSION_ID")" elif [ -f /etc/redhat-release ] && [ -r /etc/redhat-release ]; then # this is for RHEL6 _dist="rhel" _major_version=$(cat /etc/redhat-release | cut -d" " -f7 | cut -d "." -f1) _minor_version=$(cat /etc/redhat-release | cut -d" " -f7 | cut -d "." -f2) _version=$_major_version elif [ -f /etc/system-release ] && [ -r /etc/system-release ]; then if grep --quiet "Amazon Linux" /etc/system-release; then # Special case for Amazon 2014.03 _dist="amzn" _version=`awk '/Amazon Linux/{print $NF}' /etc/system-release` fi else _error_msg="$_error_msg\nDistribution cannot be determined because neither of these files exist." fi if [ -n "$_dist" ]; then _error_msg="$_error_msg\nDetected distribution is ${_dist}." _dist="$(echo "$_dist" | tr '[:upper:]' '[:lower:]')" case "$_dist" in ubuntu) _error_msg="$_error_msg\nHowever detected version $_version is less than 12." oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; [ $1 -ge 12 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 ;; debian) _error_msg="$_error_msg\nHowever detected version $_version is less than 7." oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; [ $1 -ge 7 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 ;; fedora) _error_msg="$_error_msg\nHowever detected version $_version is less than 21." oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; [ $1 -ge 21 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 ;; rhel) _error_msg="$_error_msg\nHowever detected version $_version is less than 7." oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; [ $1 -ge 6 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 ;; centos) _error_msg="$_error_msg\nHowever detected version $_version is less than 6." oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; [ $1 -ge 6 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 ;; amzn) _error_msg="$_error_msg\nHowever detected version $_version is not one of\n 2, 2.0, 2018.03, 2017.09, 2017.03, 2016.09, 2016.03, 2015.09, 2015.03, 2014.09, 2014.03." [ "$_version" = "2" ] || [ "$_version" = "2.0" ] || \ [ "$_version" = "2018.03" ] || \ [ "$_version" = "2017.03" ] || [ "$_version" = "2017.09" ] || \ [ "$_version" = "2016.03" ] || [ "$_version" = "2016.09" ] || \ [ "$_version" = "2015.03" ] || [ "$_version" = "2015.09" ] || \ [ "$_version" = "2014.03" ] || [ "$_version" = "2014.09" ] && \ LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$_version ;; sles) _error_msg="$_error_msg\nHowever detected version $_version is less than 12." oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; [ $1 -ge 12 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 ;; ol) _error_msg="$_error_msg\nHowever detected version $_version is less than 6." oIFS="$IFS"; IFS=.; set -- $_version; IFS="$oIFS"; [ $1 -ge 6 ] && LSB_DIST=$_dist && DIST_VERSION=$_version && DIST_VERSION_MAJOR=$1 ;; *) _error_msg="$_error_msg\nThat is an unsupported distribution." ;; esac fi if [ -z "$LSB_DIST" ]; then echo >&2 "$(echo | sed "i$_error_msg")" echo >&2 "" echo >&2 "Please visit the following URL for more detailed installation instructions:" echo >&2 "" echo >&2 " https://help.replicated.com/docs/distributing-an-application/installing/" exit 1 fi } KUBERNETES_STEP_LOCAL_PRIMARY=0 KUBERNETES_UPGRADE_LOCAL_PRIMARY_MINOR=0 KUBERNETES_UPGRADE_LOCAL_PRIMARY_PATCH=0 KUBERNETES_STEP_REMOTE_PRIMARIES=0 KUBERNETES_UPGRADE_REMOTE_PRIMARIES_MINOR=0 KUBERNETES_UPGRADE_REMOTE_PRIMARIES_PATCH=0 KUBERNETES_STEP_SECONDARIES=0 KUBERNETES_UPGRADE_SECONDARIES_MINOR=0 KUBERNETES_UPGRADE_SECONDARIES_PATCH=0 discoverCurrentKubernetesVersion() { local fullCluster="$1" set +e CURRENT_KUBERNETES_VERSION=$(cat /etc/kubernetes/manifests/kube-apiserver.yaml 2>/dev/null | grep image: | grep -oE '[0-9]+.[0-9]+.[0-9]+') set -e if [ -z "$CURRENT_KUBERNETES_VERSION" ]; then # This is a new install and no upgrades are required return 0 fi # These versions are for the local primary semverParse $CURRENT_KUBERNETES_VERSION KUBERNETES_CURRENT_VERSION_MAJOR="$major" KUBERNETES_CURRENT_VERSION_MINOR="$minor" KUBERNETES_CURRENT_VERSION_PATCH="$patch" if [ -z "$fullCluster" ]; then return 0 fi # Populate arrays with versions of remote nodes kubernetes_get_remote_primaries kubernetes_get_secondaries # If any nodes have a lower minor than this then we'll need to do an extra step upgrade STEP_VERSION_MINOR=$(($KUBERNETES_TARGET_VERSION_MINOR - 1)) # These will be used in preflight checks LOWEST_SUPPORTED_MINOR=$(($STEP_VERSION_MINOR - 1)) MIN_CLUSTER_NODE_MINOR_FOUND=$KUBERNETES_CURRENT_VERSION_MINOR MAX_CLUSTER_NODE_MINOR_FOUND=$KUBERNETES_CURRENT_VERSION_MINOR # Check if minor, step, or patch upgrades are needed for the local primary if [ $KUBERNETES_CURRENT_VERSION_MINOR -lt $STEP_VERSION_MINOR ]; then KUBERNETES_STEP_LOCAL_PRIMARY=1 KUBERNETES_UPGRADE_LOCAL_PRIMARY_MINOR=1 KUBERNETES_UPGRADE=1 elif [ $KUBERNETES_CURRENT_VERSION_MINOR -lt $KUBERNETES_TARGET_VERSION_MINOR ]; then KUBERNETES_UPGRADE_LOCAL_PRIMARY_MINOR=1 KUBERNETES_UPGRADE=1 elif [ $KUBERNETES_CURRENT_VERSION_PATCH -lt $KUBERNETES_TARGET_VERSION_PATCH ]; then KUBERNETES_UPGRADE_LOCAL_PRIMARY_PATCH=1 KUBERNETES_UPGRADE=1 fi # Check for upgrades required on remote primaries for i in ${!KUBERNETES_REMOTE_PRIMARIES[@]}; do semverParse ${KUBERNETES_REMOTE_PRIMARY_VERSIONS[$i]} # Adjust min and max minor vars for preflights if [ $minor -lt $MIN_CLUSTER_NODE_MINOR_FOUND ]; then MIN_CLUSTER_NODE_MINOR_FOUND=$minor fi if [ $minor -gt $MAX_CLUSTER_NODE_MINOR_FOUND ]; then MAX_CLUSTER_NODE_MINOR_FOUND=$minor fi # Check step, minor, and patch for this remote primary if [ $minor -lt $STEP_VERSION_MINOR ]; then KUBERNETES_STEP_REMOTE_PRIMARIES=1 KUBERNETES_UPGRADE_REMOTE_PRIMARIES_MINOR=1 KUBERNETES_UPGRADE=1 elif [ $minor -lt $KUBERNETES_TARGET_VERSION_MINOR ]; then KUBERNETES_UPGRADE_REMOTE_PRIMARIES_MINOR=1 KUBERNETES_UPGRADE=1 elif [ $patch -lt $KUBERNETES_TARGET_VERSION_PATCH ]; then KUBERNETES_UPGRADE_REMOTE_PRIMARIES_PATCH=1 KUBERNETES_UPGRADE=1 fi done # Check for upgrades required on remote secondaries for i in ${!KUBERNETES_SECONDARIES[@]}; do semverParse ${KUBERNETES_SECONDARY_VERSIONS[$i]} # Adjust min and max minor vars for preflights if [ $minor -lt $MIN_CLUSTER_NODE_MINOR_FOUND ]; then MIN_CLUSTER_NODE_MINOR_FOUND=$minor fi if [ $minor -gt $MAX_CLUSTER_NODE_MINOR_FOUND ]; then MAX_CLUSTER_NODE_MINOR_FOUND=$minor fi # Check step, minor, and patch for this secondary if [ $minor -lt $STEP_VERSION_MINOR ]; then KUBERNETES_STEP_SECONDARIES=1 KUBERNETES_UPGRADE_SECONDARIES_MINOR=1 KUBERNETES_UPGRADE=1 elif [ $minor -lt $KUBERNETES_TARGET_VERSION_MINOR ]; then KUBERNETES_UPGRADE_SECONDARIES_MINOR=1 KUBERNETES_UPGRADE=1 elif [ $patch -lt $KUBERNETES_TARGET_VERSION_PATCH ]; then KUBERNETES_UPGRADE_SECONDARIES_PATCH=1 KUBERNETES_UPGRADE=1 fi done # preflights if [ $MAX_CLUSTER_NODE_MINOR_FOUND -gt $KUBERNETES_TARGET_VERSION_MINOR ]; then printf "%s %s %s" \ "The currently installed kubernetes version is 1.${MAX_CLUSTER_NODE_MINOR_FOUND}." \ "The requested version to upgrade to is ${KUBERNETES_VERSION}." \ "Since the currently installed version is newer than the requested version, no action will be taken." bail fi if [ $MIN_CLUSTER_NODE_MINOR_FOUND -lt $LOWEST_SUPPORTED_MINOR ]; then MAX_UPGRADEABLE_VERSION_MINOR=$(($MIN_CLUSTER_NODE_MINOR_FOUND + 2)) printf "%s %s %s" \ "The currently installed kubernetes version is ${CURRENT_KUBERNETES_VERSION}." \ "The requested version to upgrade to is ${KUBERNETES_VERSION}." \ "Kurl can only be upgraded two minor versions at time. Please install ${KUBERNETES_TARGET_VERSION_MAJOR}.${MAX_UPGRADEABLE_VERSION_MINOR}.x. first." bail fi if [ "$KUBERNETES_STEP_LOCAL_PRIMARY" == "1" ] || [ "$KUBERNETES_STEP_REMOTE_PRIMARIES" == "1" ] || [ "$KUBERNETES_STEP_SECONDARIES" == 1 ]; then STEP_VERSION=${STEP_VERSIONS[$STEP_VERSION_MINOR]} fi } getDockerVersion() { if ! commandExists "docker" ; then return fi DOCKER_VERSION=$(docker -v | awk '{gsub(/,/, "", $3); print $3}') } discover_public_ip() { if [ "$AIRGAP" == "1" ]; then return fi # gce set +e _out=$(curl --noproxy "*" --max-time 5 --connect-timeout 2 -qSfs -H 'Metadata-Flavor: Google' http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip 2>/dev/null) _status=$? set -e if [ "$_status" -eq "0" ] && [ -n "$_out" ]; then if isValidIpv4 "$_out" || isValidIpv6 "$_out"; then PUBLIC_ADDRESS=$_out fi return fi # ec2 set +e _out=$(curl --noproxy "*" --max-time 5 --connect-timeout 2 -qSfs http://169.254.169.254/latest/meta-data/public-ipv4 2>/dev/null) _status=$? set -e if [ "$_status" -eq "0" ] && [ -n "$_out" ]; then if isValidIpv4 "$_out" || isValidIpv6 "$_out"; then PUBLIC_ADDRESS=$_out fi return fi # azure set +e _out=$(curl --noproxy "*" --max-time 5 --connect-timeout 2 -qSfs -H Metadata:true "http://169.254.169.254/metadata/instance/network/interface/0/ipv4/ipAddress/0/publicIpAddress?api-version=2017-08-01&format=text" 2>/dev/null) _status=$? set -e if [ "$_status" -eq "0" ] && [ -n "$_out" ]; then if isValidIpv4 "$_out" || isValidIpv6 "$_out"; then PUBLIC_ADDRESS=$_out fi return fi } function discover_private_ip() { if [ -n "$PRIVATE_ADDRESS" ]; then return 0 fi PRIVATE_ADDRESS=$(cat /etc/kubernetes/manifests/kube-apiserver.yaml 2>/dev/null | grep advertise-address | awk -F'=' '{ print $2 }') #This is needed on k8s 1.18.x as $PRIVATE_ADDRESS is found to have a newline PRIVATE_ADDRESS=$(echo "$PRIVATE_ADDRESS" | tr -d '\n') } function discover_non_loopback_nameservers() { local resolvConf=/etc/resolv.conf # https://github.com/kubernetes/kubernetes/blob/v1.19.3/cmd/kubeadm/app/componentconfigs/kubelet.go#L211 if systemctl is-active -q systemd-resolved; then resolvConf=/run/systemd/resolve/resolv.conf fi cat $resolvConf | grep -E '^nameserver\s+' | grep -Eqv '^nameserver\s+127' } function change_cgroup_driver_to_systemd() { # Docker uses cgroupfs by default to manage cgroup. On distributions using systemd, # i.e. RHEL and Ubuntu, this causes issues because there are now 2 seperate ways # to manage resources. For more info see the link below. # https://github.com/kubernetes/kubeadm/issues/1394#issuecomment-462878219 if [ -f /var/lib/kubelet/kubeadm-flags.env ] || [ -f /etc/docker/daemon.json ]; then return fi mkdir -p /etc/docker cat > /etc/docker/daemon.json </dev/null | grep 'Storage Driver' | awk '{print $3}' | awk -F- '{print $1}') if [ "$_driver" = "devicemapper" ] && docker info 2>/dev/null | grep -Fqs 'Data loop file:' ; then printf "${RED}The running Docker daemon is configured to use the 'devicemapper' storage driver \ in loopback mode.\nThis is not recommended for production use. Please see to the following URL for more \ information.\n\nhttps://help.replicated.com/docs/kb/developer-resources/devicemapper-warning/.${NC}\n\n\ " # HARD_FAIL_ON_LOOPBACK if [ -n "$1" ]; then printf "${RED}Please configure a recommended storage driver and try again.${NC}\n\n" exit 1 fi printf "Do you want to proceed anyway? " if ! confirmN; then exit 0 fi fi } docker_configure_proxy() { local previous_proxy=$(docker info 2>/dev/null | grep -i 'Http Proxy:' | awk '{ print $NF }') local previous_no_proxy=$(docker info 2>/dev/null | grep -i 'No Proxy:' | awk '{ print $NF }') if [ "$PROXY_ADDRESS" = "$previous_proxy" ] && [ "$NO_PROXY_ADDRESSES" = "$previous_no_proxy" ]; then return fi mkdir -p /etc/systemd/system/docker.service.d local file=/etc/systemd/system/docker.service.d/http-proxy.conf echo "# Generated by kURL" > $file echo "[Service]" >> $file if echo "$PROXY_ADDRESS" | grep -q "^https"; then echo "Environment=\"HTTPS_PROXY=${PROXY_ADDRESS}\" \"NO_PROXY=${NO_PROXY_ADDRESSES}\"" >> $file else echo "Environment=\"HTTP_PROXY=${PROXY_ADDRESS}\" \"NO_PROXY=${NO_PROXY_ADDRESSES}\"" >> $file fi restart_docker } function docker_get_host_packages_online() { local version="$1" if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then curl -sSLO "$DIST_URL/docker-${version}.tar.gz" tar xf docker-${version}.tar.gz rm docker-${version}.tar.gz fi } function containerd_get_host_packages_online() { local version="$1" if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then curl -sSLO "$DIST_URL/containerd-${version}.tar.gz" tar xf containerd-${version}.tar.gz rm containerd-${version}.tar.gz fi } export KUBECTL_PLUGINS_PATH=/usr/local/bin function kubernetes_host() { kubernetes_load_ipvs_modules kubernetes_sysctl_config # For online always download the kubernetes.tar.gz bundle. # Regardless if host packages are already installed, we always inspect for newer versions # and/or re-install any missing or corrupted packages. if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then kubernetes_get_host_packages_online "$KUBERNETES_VERSION" fi kubernetes_install_host_packages "$KUBERNETES_VERSION" load_images $DIR/packages/kubernetes/$KUBERNETES_VERSION/images install_plugins install_kustomize } function kubernetes_load_ipvs_modules() { if lsmod | grep -q ip_vs ; then return fi if [ "$KERNEL_MAJOR" -gt "4" ] || ([ "$KERNEL_MAJOR" -eq "4" ] && [ "$KERNEL_MINOR" -ge "19" ]) || ([ "$LSB_DIST" = "rhel" ] && [ "$DIST_VERSION" = "8.3" ]) || ([ "$LSB_DIST" = "centos" ] && [ "$DIST_VERSION" = "8.3" ]); then modprobe nf_conntrack else modprobe nf_conntrack_ipv4 fi modprobe ip_vs modprobe ip_vs_rr modprobe ip_vs_wrr modprobe ip_vs_sh echo 'nf_conntrack_ipv4' > /etc/modules-load.d/replicated-ipvs.conf echo 'ip_vs' >> /etc/modules-load.d/replicated-ipvs.conf echo 'ip_vs_rr' >> /etc/modules-load.d/replicated-ipvs.conf echo 'ip_vs_wrr' >> /etc/modules-load.d/replicated-ipvs.conf echo 'ip_vs_sh' >> /etc/modules-load.d/replicated-ipvs.conf } function kubernetes_sysctl_config() { case "$LSB_DIST" in # TODO I've only seen these disabled on centos/rhel but should be safe for ubuntu centos|rhel|amzn) echo "net.bridge.bridge-nf-call-ip6tables = 1" > /etc/sysctl.d/k8s.conf echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.d/k8s.conf echo "net.ipv4.conf.all.forwarding = 1" >> /etc/sysctl.d/k8s.conf sysctl --system ;; esac } # k8sVersion is an argument because this may be used to install step versions of K8s during an upgrade # to the target version function kubernetes_install_host_packages() { k8sVersion=$1 logStep "Install kubelet, kubeadm, kubectl and cni host packages" if kubernetes_host_commands_ok "$k8sVersion"; then logSuccess "Kubernetes host packages already installed" return fi if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then kubernetes_get_host_packages_online "$k8sVersion" fi case "$LSB_DIST" in ubuntu) export DEBIAN_FRONTEND=noninteractive dpkg --install --force-depends-version $DIR/packages/kubernetes/${k8sVersion}/ubuntu-${DIST_VERSION}/*.deb ;; centos|rhel|amzn) case "$LSB_DIST$DIST_VERSION_MAJOR" in rhel8|centos8) rpm --upgrade --force --nodeps $DIR/packages/kubernetes/${k8sVersion}/rhel-8/*.rpm ;; *) rpm --upgrade --force --nodeps $DIR/packages/kubernetes/${k8sVersion}/rhel-7/*.rpm ;; esac ;; esac if [ "$CLUSTER_DNS" != "$DEFAULT_CLUSTER_DNS" ]; then sed -i "s/$DEFAULT_CLUSTER_DNS/$CLUSTER_DNS/g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf fi systemctl enable kubelet && systemctl start kubelet logSuccess "Kubernetes host packages installed" } kubernetes_host_commands_ok() { local k8sVersion=$1 if ! commandExists kubelet; then printf "kubelet command missing - will install host components\n" return 1 fi if ! commandExists kubeadm; then printf "kubeadm command missing - will install host components\n" return 1 fi if ! commandExists kubectl; then printf "kubectl command missing - will install host components\n" return 1 fi if ! commandExists kustomize; then printf "kustomize command missing - will install host components\n" return 1 fi kubelet --version | grep -q "$k8sVersion" } function kubernetes_get_host_packages_online() { local k8sVersion="$1" if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then curl -sSLO "$DIST_URL/kubernetes-${k8sVersion}.tar.gz" tar xf kubernetes-${k8sVersion}.tar.gz rm kubernetes-${k8sVersion}.tar.gz fi } function kubernetes_masters() { kubectl get nodes --no-headers --selector="node-role.kubernetes.io/master" } function kubernetes_remote_masters() { kubectl get nodes --no-headers --selector="node-role.kubernetes.io/master,kubernetes.io/hostname!=$(hostname)" 2>/dev/null } function kubernetes_workers() { kubectl get node --no-headers --selector='!node-role.kubernetes.io/master' 2>/dev/null } # exit 0 if there are any remote workers or masters function kubernetes_has_remotes() { if ! kubernetes_api_is_healthy; then # assume this is a new install return 1 fi local count=$(kubectl get nodes --no-headers --selector="kubernetes.io/hostname!=$(hostname)" 2>/dev/null | wc -l) if [ "$count" -gt "0" ]; then return 0 fi return 1 } function kubernetes_api_address() { if [ -n "$LOAD_BALANCER_ADDRESS" ]; then echo "${LOAD_BALANCER_ADDRESS}:${LOAD_BALANCER_PORT}" return fi echo "${PRIVATE_ADDRESS}:6443" } function kubernetes_api_is_healthy() { curl --noproxy "*" --fail --silent --insecure "https://$(kubernetes_api_address)/healthz" >/dev/null } function spinner_kubernetes_api_healthy() { if ! spinner_until 120 kubernetes_api_is_healthy; then bail "Kubernetes API failed to report healthy" fi } # With AWS NLB kubectl commands may fail to connect to the Kubernetes API immediately after a single # successful health check function spinner_kubernetes_api_stable() { for i in {1..10}; do sleep 1 spinner_kubernetes_api_healthy done } function kubernetes_drain() { kubectl drain "$1" \ --delete-local-data \ --ignore-daemonsets \ --force \ --grace-period=30 \ --timeout=120s \ --pod-selector 'app notin (rook-ceph-mon,rook-ceph-osd,rook-ceph-osd-prepare,rook-ceph-operator,rook-ceph-agent),k8s-app!=kube-dns' || true } function kubernetes_node_has_version() { local name="$1" local version="$2" local actual_version="$(try_1m kubernetes_node_kubelet_version $name)" [ "$actual_version" = "v${version}" ] } function kubernetes_node_kubelet_version() { local name="$1" kubectl get node "$name" -o=jsonpath='{@.status.nodeInfo.kubeletVersion}' } function kubernetes_any_remote_master_unupgraded() { while read -r master; do local name=$(echo $master | awk '{ print $1 }') if ! kubernetes_node_has_version "$name" "$KUBERNETES_VERSION"; then return 0 fi done < <(kubernetes_remote_masters) return 1 } function kubernetes_any_worker_unupgraded() { while read -r worker; do local name=$(echo $worker | awk '{ print $1 }') if ! kubernetes_node_has_version "$name" "$KUBERNETES_VERSION"; then return 0 fi done < <(kubernetes_workers) return 1 } function kubelet_version() { kubelet --version | cut -d ' ' -f 2 | sed 's/v//' } function kubernetes_nodes_ready() { if try_1m kubectl get nodes --no-headers | awk '{ print $2 }' | grep -q "NotReady"; then return 1 fi return 0 } function kubernetes_scale_down() { local ns="$1" local kind="$2" local name="$3" if ! kubernetes_resource_exists "$ns" "$kind" "$name"; then return 0 fi kubectl -n "$ns" scale "$kind" "$name" --replicas=0 } function kubernetes_secret_value() { local ns="$1" local name="$2" local key="$3" kubectl -n "$ns" get secret "$name" -ojsonpath="{ .data.$key }" 2>/dev/null | base64 --decode } function install_plugins() { pushd "$DIR/krew" tar xzvf outdated.tar.gz && mv outdated /usr/local/bin/kubectl-outdated tar xzvf preflight.tar.gz && mv preflight /usr/local/bin/kubectl-preflight tar xzvf support-bundle.tar.gz && mv support-bundle /usr/local/bin/kubectl-support_bundle popd # uninstall system-wide krew from old versions of kurl rm -rf /opt/replicated/krew sed -i '/^export KUBECTL_PLUGINS_PATH.*KREW_ROOT/d' /etc/profile sed -i '/^export KREW_ROOT.*replicated/d' /etc/profile } function install_kustomize() { if ! kubernetes_is_master; then return 0 elif [ ! -d "$DIR/packages/kubernetes/${k8sVersion}/assets" ]; then echo "Kustomize package is missing in your distribution. Skipping." return 0 fi kustomize_dir=/usr/local/bin pushd "$DIR/packages/kubernetes/${k8sVersion}/assets" for file in $(ls kustomize-*);do if [ "${file: -6}" == "tar.gz" ];then tar xf ${file} chmod a+x kustomize mv kustomize /usr/local/bin/${file%%.tar*} else # Earlier versions of kustomize weren't archived/compressed chmod a+x ${file} cp ${file} ${kustomize_dir} fi done popd if ls ${kustomize_dir}/kustomize-* 1>/dev/null 2>&1;then latest_binary=$(basename $(ls ${kustomize_dir}/kustomize-* | sort -V | tail -n 1)) # Link to the latest version ln -s -f ${kustomize_dir}/${latest_binary} ${kustomize_dir}/kustomize fi } function kubernetes_is_master() { if [ "$MASTER" = "1" ]; then return 0 elif [ -f /etc/kubernetes/manifests/kube-apiserver.yaml ]; then return 0 else return 1 fi } function discover_pod_subnet() { local excluded="" if ! ip route show src "$PRIVATE_ADDRESS" | awk '{ print $1 }' | grep -q '/'; then excluded="--exclude-subnet=${PRIVATE_ADDRESS}/16" fi if [ -n "$POD_CIDR" ]; then local podCidrSize=$(echo $POD_CIDR | awk -F'/' '{ print $2 }') # if pod-cidr flag and pod-cidr-range are both set, validate pod-cidr is as large as pod-cidr-range if [ -n "$POD_CIDR_RANGE" ]; then if [ "$podCidrSize" -gt "$POD_CIDR_RANGE" ]; then bail "Pod cidr must be at least /$POD_CIDR_RANGE" fi fi # if pod cidr flag matches existing weave pod cidr don't validate if [ "$POD_CIDR" = "$EXISTING_POD_CIDR" ]; then return 0 elif [ -n "$EXISTING_POD_CIDR" ]; then bail "Pod cidr cannot be changed to $POD_CIDR because existing cidr is $EXISTING_POD_CIDR" fi if $DIR/bin/subnet --subnet-alloc-range "$POD_CIDR" --cidr-range "$podCidrSize" "$excluded" 1>/dev/null; then return 0 fi printf "${RED}Pod cidr ${POD_CIDR} overlaps with existing route. Continue? ${NC}" if ! confirmY "-t 60"; then exit 1 fi return 0 fi # detected from weave device if [ -n "$EXISTING_POD_CIDR" ]; then POD_CIDR="$EXISTING_POD_CIDR" return 0 fi local size="$POD_CIDR_RANGE" if [ -z "$size" ]; then size="22" fi # find a network for the Pods, preferring start at 10.32.0.0 if podnet=$($DIR/bin/subnet --subnet-alloc-range "10.32.0.0/16" --cidr-range "$size" "$excluded"); then echo "Found pod network: $podnet" POD_CIDR="$podnet" return 0 fi if podnet=$($DIR/bin/subnet --subnet-alloc-range "10.0.0.0/8" --cidr-range "$size" "$excluded"); then echo "Found pod network: $podnet" POD_CIDR="$podnet" return 0 fi bail "Failed to find available subnet for pod network. Use the pod-cidr flag to set a pod network" } # This must run after discover_pod_subnet since it excludes the pod cidr function discover_service_subnet() { local excluded="--exclude-subnet=$POD_CIDR" if ! ip route show src "$PRIVATE_ADDRESS" | awk '{ print $1 }' | grep -q '/'; then excluded="$excluded,${PRIVATE_ADDRESS}/16" fi EXISTING_SERVICE_CIDR=$(kubeadm config view 2>/dev/null | grep serviceSubnet | awk '{ print $2 }') if [ -n "$SERVICE_CIDR" ]; then local serviceCidrSize=$(echo $SERVICE_CIDR | awk -F'/' '{ print $2 }') # if service-cidr flag and service-cidr-range are both set, validate service-cidr is as large as service-cidr-range if [ -n "$SERVICE_CIDR_RANGE" ]; then if [ "$serviceCidrSize" -gt "$SERVICE_CIDR_RANGE" ]; then bail "Service cidr must be at least /$SERVICE_CIDR_RANGE" fi fi # if service-cidr flag matches existing service cidr don't validate if [ "$SERVICE_CIDR" = "$EXISTING_SERVICE_CIDR" ]; then return 0 elif [ -n "$EXISTING_SERVICE_CIDR" ]; then bail "Service cidr cannot be changed to $SERVICE_CIDR because existing cidr is $EXISTING_SERVICE_CIDR" fi if $DIR/bin/subnet --subnet-alloc-range "$SERVICE_CIDR" --cidr-range "$serviceCidrSize" "$excluded" 1>/dev/null; then return 0 fi printf "${RED}Service cidr ${SERVICE_CIDR} overlaps with existing route. Continue? ${NC}" if ! confirmY "-t 60"; then exit 1 fi return 0 fi if [ -n "$EXISTING_SERVICE_CIDR" ]; then echo "Using existing service cidr ${EXISTING_SERVICE_CIDR}" SERVICE_CIDR="$EXISTING_SERVICE_CIDR" return 0 fi local size="$SERVICE_CIDR_RANGE" if [ -z "$size" ]; then size="22" fi # find a network for the services, preferring start at 10.96.0.0 if servicenet=$($DIR/bin/subnet --subnet-alloc-range "10.96.0.0/16" --cidr-range "$size" "$excluded"); then echo "Found service network: $servicenet" SERVICE_CIDR="$servicenet" return 0 fi if servicenet=$($DIR/bin/subnet --subnet-alloc-range "10.0.0.0/8" --cidr-range "$size" "$excluded"); then echo "Found service network: $servicenet" SERVICE_CIDR="$servicenet" return 0 fi bail "Failed to find available subnet for service network. Use the service-cidr flag to set a service network" } function kubernetes_node_images() { local nodeName="$1" kubectl get node "$nodeName" -ojsonpath="{range .status.images[*]}{ range .names[*] }{ @ }{'\n'}{ end }{ end }" } function list_all_required_images() { find packages/kubernetes/$KUBERNETES_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' if [ -n "$STEP_VERSION" ]; then find packages/kubernetes/$STEP_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$DOCKER_VERSION" ]; then find packages/docker/$DOCKER_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$WEAVE_VERSION" ]; then find addons/weave/$WEAVE_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$ROOK_VERSION" ]; then find addons/rook/$ROOK_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$OPENEBS_VERSION" ]; then find addons/openebs/$OPENEBS_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$MINIO_VERSION" ]; then find addons/minio/$MINIO_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$CONTOUR_VERSION" ]; then find addons/contour/$CONTOUR_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$REGISTRY_VERSION" ]; then find addons/registry/$REGISTRY_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$PROMETHEUS_VERSION" ]; then find addons/prometheus/$PROMETHEUS_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$KOTSADM_VERSION" ]; then find addons/kotsadm/$KOTSADM_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$FLUENTD_VERSION" ]; then find addons/fluentd/$FLUENTD_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$VELERO_VERSION" ]; then find addons/velero/$VELERO_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$EKCO_VERSION" ]; then find addons/ekco/$EKCO_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$CERT_MANAGER_VERSION" ]; then find addons/cert-manager/$CERT_MANAGER_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi if [ -n "$METRICS_SERVER_VERSION" ]; then find addons/metrics-server/$METRICS_SERVER_VERSION -type f -name Manifest 2>/dev/null | xargs cat | grep -E '^image' | grep -v no_remote_load | awk '{ print $3 }' fi } function kubernetes_node_has_all_images() { local nodeName="$1" while read -r image; do if ! kubernetes_node_has_image "$nodeName" "$image"; then printf "\n${YELLOW}Node $nodeName missing image $image${NC}\n" return 1 fi done < <(list_all_required_images) } function kubernetes_node_has_image() { local nodeName="$1" # docker.io/envoyproxy/envoy-alpine:v1.10.0 -> envoyproxy/envoy-alpine:v1.10.0 local image=$(echo $2 | sed 's/^docker.io\///') while read -r nodeImage; do if [ "$nodeImage" = "$image" ]; then return 0 fi done < <(kubernetes_node_images "$nodeName") return 1 } KUBERNETES_REMOTE_PRIMARIES=() KUBERNETES_REMOTE_PRIMARY_VERSIONS=() function kubernetes_get_remote_primaries() { while read -r primary; do local name=$(echo $primary | awk '{ print $1 }') local version="$(try_1m kubernetes_node_kubelet_version $name)" KUBERNETES_REMOTE_PRIMARIES+=( $name ) KUBERNETES_REMOTE_PRIMARY_VERSIONS+=( $version ) done < <(kubernetes_remote_masters) } KUBERNETES_SECONDARIES=() KUBERNETES_SECONDARY_VERSIONS=() function kubernetes_get_secondaries() { while read -r secondary; do local name=$(echo $secondary | awk '{ print $1 }') local version="$(try_1m kubernetes_node_kubelet_version $name)" KUBERNETES_SECONDARIES+=( $name ) KUBERNETES_SECONDARY_VERSIONS+=( $version ) done < <(kubernetes_workers) } function kubernetes_load_balancer_address() { kubeadm config view 2>/dev/null | grep 'controlPlaneEndpoint:' | sed 's/controlPlaneEndpoint: \|"//g' } function kubernetes_pod_started() { name=$1 namespace=$2 phase=$(kubectl -n $namespace get pod $name -ojsonpath='{ .status.phase }') case "$phase" in Running|Failed|Succeeded) return 0 ;; esac return 1 } function kubernetes_pod_completed() { name=$1 namespace=$2 phase=$(kubectl -n $namespace get pod $name -ojsonpath='{ .status.phase }') case "$phase" in Failed|Succeeded) return 0 ;; esac return 1 } function object_store_exists() { if [ -n "$OBJECT_STORE_ACCESS_KEY" ] && \ [ -n "$OBJECT_STORE_SECRET_KEY" ] && \ [ -n "$OBJECT_STORE_CLUSTER_IP" ]; then return 0 else return 1 fi } function object_store_create_bucket() { if object_store_bucket_exists "$1" ; then echo "object store bucket $1 exists" return 0 fi if ! _object_store_create_bucket "$1" ; then return 1 fi echo "object store bucket $1 created" } function _object_store_create_bucket() { local bucket=$1 local acl="x-amz-acl:private" local d=$(LC_TIME="en_US.UTF-8" TZ="UTC" date +"%a, %d %b %Y %T %z") local string="PUT\n\n\n${d}\n${acl}\n/$bucket" local sig=$(echo -en "${string}" | openssl sha1 -hmac "${OBJECT_STORE_SECRET_KEY}" -binary | base64) curl -f -X PUT \ --noproxy "*" \ -H "Host: $OBJECT_STORE_CLUSTER_IP" \ -H "Date: $d" \ -H "$acl" \ -H "Authorization: AWS $OBJECT_STORE_ACCESS_KEY:$sig" \ "http://$OBJECT_STORE_CLUSTER_IP/$bucket" >/dev/null } function object_store_bucket_exists() { local bucket=$1 local acl="x-amz-acl:private" local d=$(LC_TIME="en_US.UTF-8" TZ="UTC" date +"%a, %d %b %Y %T %z") local string="HEAD\n\n\n${d}\n${acl}\n/$bucket" local sig=$(echo -en "${string}" | openssl sha1 -hmac "${OBJECT_STORE_SECRET_KEY}" -binary | base64) curl -f -I \ --noproxy "*" \ -H "Host: $OBJECT_STORE_CLUSTER_IP" \ -H "Date: $d" \ -H "$acl" \ -H "Authorization: AWS $OBJECT_STORE_ACCESS_KEY:$sig" \ "http://$OBJECT_STORE_CLUSTER_IP/$bucket" &>/dev/null } function preflights() { require64Bit bailIfUnsupportedOS mustSwapoff promptIfDockerUnsupportedOS checkDockerK8sVersion checkFirewalld must_disable_selinux apply_iptables_config cri_preflights kotsadm_prerelease host_nameservers_reachable return 0 } function require_root_user() { local user="$(id -un 2>/dev/null || true)" if [ "$user" != "root" ]; then bail "Error: this installer needs to be run as root." fi } function require64Bit() { case "$(uname -m)" in *64) ;; *) echo >&2 'Error: you are not using a 64bit platform.' echo >&2 'This installer currently only supports 64bit platforms.' exit 1 ;; esac } function bailIfUnsupportedOS() { case "$LSB_DIST$DIST_VERSION" in ubuntu16.04|ubuntu18.04|ubuntu20.04|rhel7.4|rhel7.5|rhel7.6|rhel7.7|rhel7.8|rhel7.9|rhel8.0|rhel8.1|rhel8.2|rhel8.3|centos7.4|centos7.5|centos7.6|centos7.7|centos7.8|centos7.9|centos8.0|centos8.1|centos8.2|centos8.3|amzn2) ;; *) bail "Kubernetes install is not supported on ${LSB_DIST} ${DIST_VERSION}" ;; esac } function mustSwapoff() { if swap_is_on || swap_is_enabled; then printf "\n${YELLOW}This application is incompatible with memory swapping enabled. Disable swap to continue?${NC} " if confirmY ; then printf "=> Running swapoff --all\n" swapoff --all if swap_fstab_enabled; then swap_fstab_disable fi if swap_service_enabled; then swap_service_disable fi if swap_azure_linux_agent_enabled; then swap_azure_linux_agent_disable fi logSuccess "Swap disabled.\n" else bail "\nDisable swap with swapoff --all and remove all swap entries from /etc/fstab before re-running this script" fi fi } function swap_is_on() { swapon --summary | grep --quiet " " # todo this could be more specific, swapon -s returns nothing if its off } function swap_is_enabled() { swap_fstab_enabled || swap_service_enabled || swap_azure_linux_agent_enabled } function swap_fstab_enabled() { cat /etc/fstab | grep --quiet --ignore-case --extended-regexp '^[^#]+swap' } function swap_fstab_disable() { printf "=> Commenting swap entries in /etc/fstab \n" sed --in-place=.bak '/\bswap\b/ s/^/#/' /etc/fstab printf "=> A backup of /etc/fstab has been made at /etc/fstab.bak\n\n" printf "\n${YELLOW}Changes have been made to /etc/fstab. We recommend reviewing them after completing this installation to ensure mounts are correctly configured.${NC}\n\n" sleep 5 # for emphasis of the above ^ } # This is a service on some Azure VMs that just enables swap function swap_service_enabled() { systemctl -q is-enabled temp-disk-swapfile } function swap_service_disable() { printf "=> Disabling temp-disk-swapfile service\n" systemctl disable temp-disk-swapfile } function swap_azure_linux_agent_enabled() { cat /etc/waagent.conf | grep -q 'ResourceDisk.EnableSwap=y' } function swap_azure_linux_agent_disable() { printf "=> Disabling swap in Azure Linux Agent configuration file /etc/waagent.conf\n" sed -i 's/ResourceDisk.EnableSwap=y/ResourceDisk.EnableSwap=n/g' /etc/waagent.conf } checkDockerK8sVersion() { getDockerVersion if [ -z "$DOCKER_VERSION" ]; then return fi case "$KUBERNETES_TARGET_VERSION_MINOR" in 14|15) compareDockerVersions "$DOCKER_VERSION" 1.13.1 if [ "$COMPARE_DOCKER_VERSIONS_RESULT" -eq "-1" ]; then bail "Minimum Docker version for Kubernetes $KUBERNETES_VERSION is 1.13.1." fi ;; esac } promptIfDockerUnsupportedOS() { if [ -z "$DOCKER_VERSION" ]; then return fi case "$LSB_DIST" in centos|rhel) if [[ "$DIST_VERSION" =~ ^8 ]]; then logWarn "Docker is not supported on ${LSB_DIST} ${DIST_VERSION}." logWarn "The containerd addon is recommended. https://kurl.sh/docs/add-ons/containerd" if ! commandExists "docker" ; then printf "${YELLOW}Continue? ${NC}" 1>&2 if ! confirmY "-t 30"; then exit 1 fi fi fi ;; esac } checkFirewalld() { if [ -n "$PRESERVE_DOCKER_CONFIG" ]; then return fi apply_firewalld_config if [ "$BYPASS_FIREWALLD_WARNING" = "1" ]; then return fi if ! systemctl -q is-active firewalld ; then return fi if [ "$HARD_FAIL_ON_FIREWALLD" = "1" ]; then printf "${RED}Firewalld is active${NC}\n" 1>&2 exit 1 fi if [ -n "$DISABLE_FIREWALLD" ]; then systemctl stop firewalld systemctl disable firewalld return fi printf "${YELLOW}Firewalld is active, please press Y to disable ${NC}" if confirmY ; then systemctl stop firewalld systemctl disable firewalld return fi printf "${YELLOW}Continue with firewalld active? ${NC}" if confirmY ; then BYPASS_FIREWALLD_WARNING=1 return fi exit 1 } must_disable_selinux() { # From kubernets kubeadm docs for RHEL: # # Disabling SELinux by running setenforce 0 is required to allow containers to # access the host filesystem, which is required by pod networks for example. # You have to do this until SELinux support is improved in the kubelet. # Check and apply YAML overrides if [ -n "$PRESERVE_SELINUX_CONFIG" ]; then return fi apply_selinux_config if [ -n "$BYPASS_SELINUX_PREFLIGHT" ]; then return fi if selinux_enabled && selinux_enforced ; then if [ -n "$DISABLE_SELINUX" ]; then setenforce 0 sed -i s/^SELINUX=.*$/SELINUX=permissive/ /etc/selinux/config return fi printf "\n${YELLOW}Kubernetes is incompatible with SELinux. Disable SELinux to continue?${NC} " if confirmY ; then setenforce 0 sed -i s/^SELINUX=.*$/SELINUX=permissive/ /etc/selinux/config else bail "\nDisable SELinux with 'setenforce 0' before re-running install script" fi fi } function force_docker() { DOCKER_VERSION="19.03.4" echo "NO CRI version was listed in yaml or found on host OS, defaulting to online docker install" echo "THIS FEATURE IS NOT SUPPORTED AND WILL BE DEPRECATED IN FUTURE KURL VERSIONS" } function cri_preflights() { require_cri } function require_cri() { if commandExists docker ; then SKIP_DOCKER_INSTALL=1 return 0 fi if commandExists ctr ; then SKIP_CONTAINERD_INSTALL=1 return 0 fi if [ "$LSB_DIST" = "rhel" ]; then if [ -n "$NO_CE_ON_EE" ]; then printf "${RED}Enterprise Linux distributions require Docker Enterprise Edition. Please install Docker before running this installation script.${NC}\n" 1>&2 return 0 fi fi if [ "$SKIP_DOCKER_INSTALL" = "1" ]; then bail "Docker is required" fi if [ -z "$DOCKER_VERSION" ] && [ -z "$CONTAINERD_VERSION" ]; then force_docker fi return 0 } selinux_enabled() { if commandExists "selinuxenabled"; then selinuxenabled return elif commandExists "sestatus"; then ENABLED=$(sestatus | grep 'SELinux status' | awk '{ print $3 }') echo "$ENABLED" | grep --quiet --ignore-case enabled return fi return 1 } selinux_enforced() { if commandExists "getenforce"; then ENFORCED=$(getenforce) echo $(getenforce) | grep --quiet --ignore-case enforcing return elif commandExists "sestatus"; then ENFORCED=$(sestatus | grep 'SELinux mode' | awk '{ print $3 }') echo "$ENFORCED" | grep --quiet --ignore-case enforcing return fi return 1 } function kotsadm_prerelease() { if [ "$KOTSADM_VERSION" = "alpha" ]; then printf "\n${YELLOW}This is a prerelease version of kotsadm and should not be run in production. Press Y to continue.${NC} " if ! confirmN; then bail "\nWill not install prerelease version of kotsadm." fi fi } function host_nameservers_reachable() { if [ -n "$NAMESERVER" ] || [ "$AIRGAP" = "1" ]; then return 0 fi if ! discover_non_loopback_nameservers; then bail "\nAt least one nameserver must be accessible on a non-loopback address. Use the \"nameserver\" flag in the installer spec to override the loopback nameservers discovered on the host: https://kurl.sh/docs/add-ons/kurl" fi } # Gather any additional information required from the user that could not be discovered and was not # passed with a flag function prompts() { if [ -z "$PRIVATE_ADDRESS" ]; then promptForPrivateIp fi # TODO public address? only required for adding SAN to K8s API server cert prompt_airgap_preload_images if [ "$HA_CLUSTER" = "1" ]; then promptForLoadBalancerAddress fi } if [ -z "$READ_TIMEOUT" ]; then READ_TIMEOUT="-t 20" fi promptTimeout() { set +e if [ -z "$FAST_TIMEOUTS" ]; then read ${1:-$READ_TIMEOUT} PROMPT_RESULT < /dev/tty else read ${READ_TIMEOUT} PROMPT_RESULT < /dev/tty fi set -e } confirmY() { printf "(Y/n) " if [ "$ASSUME_YES" = "1" ]; then echo "Y" return 0 fi promptTimeout "$@" if [ "$PROMPT_RESULT" = "n" ] || [ "$PROMPT_RESULT" = "N" ]; then return 1 fi return 0 } confirmN() { printf "(y/N) " if [ "$ASSUME_YES" = "1" ]; then echo "Y" return 0 fi promptTimeout "$@" if [ "$PROMPT_RESULT" = "y" ] || [ "$PROMPT_RESULT" = "Y" ]; then return 0 fi return 1 } # even if someone has set ASSUME_YES, we shouldn't automatically upload a support bundle supportBundleConfirmN() { printf "(y/N) " if [ "$ASSUME_YES" = "1" ]; then echo "N" return 1 fi promptTimeout "$@" if [ "$PROMPT_RESULT" = "y" ] || [ "$PROMPT_RESULT" = "Y" ]; then return 0 fi return 1 } prompt() { if [ "$ASSUME_YES" = "1" ]; then return 0 fi set +e read PROMPT_RESULT < /dev/tty set -e } function joinPrompts() { if [ -n "$API_SERVICE_ADDRESS" ]; then splitHostPort "$API_SERVICE_ADDRESS" if [ -z "$PORT" ]; then PORT="6443" fi KUBERNETES_MASTER_ADDR="$HOST" KUBERNETES_MASTER_PORT="$PORT" LOAD_BALANCER_ADDRESS="$HOST" LOAD_BALANCER_PORT="$PORT" else promptForMasterAddress splitHostPort "$KUBERNETES_MASTER_ADDR" if [ -n "$PORT" ]; then KUBERNETES_MASTER_ADDR="$HOST" KUBERNETES_MASTER_PORT="$PORT" else KUBERNETES_MASTER_PORT="6443" fi LOAD_BALANCER_ADDRESS="$KUBERNETES_MASTER_ADDR" LOAD_BALANCER_PORT="$KUBERNETES_MASTER_PORT" API_SERVICE_ADDRESS="${KUBERNETES_MASTER_ADDR}:${KUBERNETES_MASTER_PORT}" fi promptForToken promptForTokenCAHash } promptForToken() { if [ -n "$KUBEADM_TOKEN" ]; then return fi printf "Please enter the kubernetes discovery token.\n" while true; do printf "Kubernetes join token: " prompt if [ -n "$PROMPT_RESULT" ]; then KUBEADM_TOKEN="$PROMPT_RESULT" return fi done } promptForTokenCAHash() { if [ -n "$KUBEADM_TOKEN_CA_HASH" ]; then return fi printf "Please enter the discovery token CA's hash.\n" while true; do printf "Kubernetes discovery token CA hash: " prompt if [ -n "$PROMPT_RESULT" ]; then KUBEADM_TOKEN_CA_HASH="$PROMPT_RESULT" return fi done } promptForMasterAddress() { if [ -n "$KUBERNETES_MASTER_ADDR" ]; then return fi printf "Please enter the Kubernetes master address.\n" printf "e.g. 10.128.0.4\n" while true; do printf "Kubernetes master address: " prompt if [ -n "$PROMPT_RESULT" ]; then KUBERNETES_MASTER_ADDR="$PROMPT_RESULT" return fi done } promptForLoadBalancerAddress() { local lastLoadBalancerAddress= if kubeadm config view >/dev/null 2>&1; then lastLoadBalancerAddress="$(kubeadm config view | grep 'controlPlaneEndpoint:' | sed 's/controlPlaneEndpoint: \|"//g')" if [ -n "$lastLoadBalancerAddress" ]; then splitHostPort "$lastLoadBalancerAddress" if [ "$HOST" = "$lastLoadBalancerAddress" ]; then lastLoadBalancerAddress="$lastLoadBalancerAddress:6443" fi fi fi if [ -n "$LOAD_BALANCER_ADDRESS" ] && [ -n "$lastLoadBalancerAddress" ]; then splitHostPort "$LOAD_BALANCER_ADDRESS" if [ "$HOST" = "$LOAD_BALANCER_ADDRESS" ]; then LOAD_BALANCER_ADDRESS="$LOAD_BALANCER_ADDRESS:6443" fi if [ "$LOAD_BALANCER_ADDRESS" != "$lastLoadBalancerAddress" ]; then LOAD_BALANCER_ADDRESS_CHANGED=1 fi fi if [ -z "$LOAD_BALANCER_ADDRESS" ] && [ -n "$lastLoadBalancerAddress" ]; then LOAD_BALANCER_ADDRESS="$lastLoadBalancerAddress" fi if [ -z "$LOAD_BALANCER_ADDRESS" ]; then printf "Please enter a load balancer address to route external and internal traffic to the API servers.\n" printf "In the absence of a load balancer address, all traffic will be routed to the first master.\n" printf "Load balancer address: " prompt LOAD_BALANCER_ADDRESS="$PROMPT_RESULT" if [ -z "$LOAD_BALANCER_ADDRESS" ]; then LOAD_BALANCER_ADDRESS="$PRIVATE_ADDRESS" LOAD_BALANCER_PORT=6443 fi fi if [ -z "$LOAD_BALANCER_PORT" ]; then splitHostPort "$LOAD_BALANCER_ADDRESS" LOAD_BALANCER_ADDRESS="$HOST" LOAD_BALANCER_PORT="$PORT" fi if [ -z "$LOAD_BALANCER_PORT" ]; then LOAD_BALANCER_PORT=6443 fi } # if remote nodes are in the cluster and this is an airgap install, prompt the user to run the # load-images task on all remotes before proceeding because remaining steps may cause pods to # be scheduled on those nodes with new images. function prompt_airgap_preload_images() { if [ "$AIRGAP" != "1" ]; then return 0 fi if ! kubernetes_has_remotes; then return 0 fi while read -r node; do local nodeName=$(echo "$node" | awk '{ print $1 }') if [ "$nodeName" = "$(hostname)" ]; then continue fi if kubernetes_node_has_all_images "$nodeName"; then continue fi printf "\nRun this script on node ${GREEN}${nodeName}${NC} to load required images before proceeding:\n" printf "\n" printf "${GREEN}\tcat ./tasks.sh | sudo bash -s load-images${NC}" printf "\n" while true; do echo "" printf "Have images been loaded on node ${nodeName}? " if confirmY " "; then break fi done done < <(kubectl get nodes --no-headers) } promptForPublicIp() { if [ -n "$PUBLIC_ADDRESS" ]; then return 0; fi while true; do printf "Public IP address: " promptTimeout "-t 120" if [ -n "$PROMPT_RESULT" ]; then if isValidIpv4 "$PROMPT_RESULT"; then PUBLIC_ADDRESS=$PROMPT_RESULT break else printf "%s is not a valid ip address.\n" "$PROMPT_RESULT" fi else break fi done } promptForPrivateIp() { _count=0 _regex="^[[:digit:]]+: ([^[:space:]]+)[[:space:]]+[[:alnum:]]+ ([[:digit:].]+)" while read -r _line; do [[ $_line =~ $_regex ]] if [ "${BASH_REMATCH[1]}" != "lo" ] && [ "${BASH_REMATCH[1]}" != "kube-ipvs0" ] && [ "${BASH_REMATCH[1]}" != "docker0" ] && [ "${BASH_REMATCH[1]}" != "weave" ]; then _iface_names[$((_count))]=${BASH_REMATCH[1]} _iface_addrs[$((_count))]=${BASH_REMATCH[2]} let "_count += 1" fi done <<< "$(ip -4 -o addr)" if [ "$_count" -eq "0" ]; then echo >&2 "Error: The installer couldn't discover any valid network interfaces on this machine." echo >&2 "Check your network configuration and re-run this script again." echo >&2 "If you want to skip this discovery process, pass the 'local-address' arg to this script, e.g. 'sudo ./install.sh local-address=1.2.3.4'" exit 1 elif [ "$_count" -eq "1" ]; then PRIVATE_ADDRESS=${_iface_addrs[0]} printf "The installer will use network interface '%s' (with IP address '%s')\n" "${_iface_names[0]}" "${_iface_addrs[0]}" return fi printf "The installer was unable to automatically detect the private IP address of this machine.\n" printf "Please choose one of the following network interfaces:\n" for i in $(seq 0 $((_count-1))); do printf "[%d] %-5s\t%s\n" "$i" "${_iface_names[$i]}" "${_iface_addrs[$i]}" done while true; do printf "Enter desired number (0-%d): " "$((_count-1))" prompt if [ -z "$PROMPT_RESULT" ]; then continue fi if [ "$PROMPT_RESULT" -ge "0" ] && [ "$PROMPT_RESULT" -lt "$_count" ]; then PRIVATE_ADDRESS=${_iface_addrs[$PROMPT_RESULT]} printf "The installer will use network interface '%s' (with IP address '%s').\n" "${_iface_names[$PROMPT_RESULT]}" "$PRIVATE_ADDRESS" return fi done } function proxy_bootstrap() { if [ -n "$HTTP_PROXY" ]; then ENV_PROXY_ADDRESS="$HTTP_PROXY" export https_proxy="$HTTP_PROXY" printf "The installer will use the proxy at '%s' (imported from env var 'HTTP_PROXY')\n" "$ENV_PROXY_ADDRESS" elif [ -n "$http_proxy" ]; then ENV_PROXY_ADDRESS="$http_proxy" export https_proxy="$http_proxy" printf "The installer will use the proxy at '%s' (imported from env var 'http_proxy')\n" "$ENV_PROXY_ADDRESS" elif [ -n "$HTTPS_PROXY" ]; then ENV_PROXY_ADDRESS="$HTTPS_PROXY" printf "The installer will use the proxy at '%s' (imported from env var 'HTTPS_PROXY')\n" "$ENV_PROXY_ADDRESS" elif [ -n "$https_proxy" ]; then ENV_PROXY_ADDRESS="$https_proxy" printf "The installer will use the proxy at '%s' (imported from env var 'https_proxy')\n" "$ENV_PROXY_ADDRESS" fi if [ -n "$NO_PROXY" ]; then ENV_NO_PROXY="$NO_PROXY" elif [ -n "$no_proxy" ]; then ENV_NO_PROXY="$no_proxy" fi # Need to peek at the yaml spec to find if a proxy is needed to download the util binaries if [ -n "$INSTALLER_SPEC_FILE" ]; then local overrideProxy=$(grep "proxyAddress:" "$INSTALLER_SPEC_FILE" | grep -o "http[^'\" ]*") if [ -n "$overrideProxy" ]; then export https_proxy="$overrideProxy" kubectl_no_proxy echo "Bootstrapped proxy address from installer spec file: $https_proxy" return fi fi local proxy=$(echo "$INSTALLER_YAML" | grep "proxyAddress:" | grep -o "http[^'\" ]*") if [ -n "$proxy" ]; then export https_proxy="$proxy" kubectl_no_proxy echo "Bootstrapped proxy address from installer yaml: $https_proxy" return fi if [ -n "$ENV_PROXY_ADDRESS" ]; then export https_proxy="$ENV_PROXY_ADDRESS" kubectl_no_proxy return fi } function kubectl_no_proxy() { if [ ! -f /etc/kubernetes/admin.conf ]; then return fi kubectlEndpoint=$(cat /etc/kubernetes/admin.conf | grep 'server:' | awk '{ print $NF }' | sed -E 's/https?:\/\///g') splitHostPort "$kubectlEndpoint" if [ -n "$no_proxy" ]; then export no_proxy="$no_proxy,$HOST" else export no_proxy="$HOST" fi } function configure_proxy() { if [ "$NO_PROXY" = "1" ]; then echo "Not using http proxy" unset PROXY_ADDRESS unset http_proxy unset HTTP_PROXY unset https_proxy unset HTTPS_PROXY return fi if [ -z "$PROXY_ADDRESS" ] && [ -z "$ENV_PROXY_ADDRESS" ]; then return fi if [ -z "$PROXY_ADDRESS" ]; then PROXY_ADDRESS="$ENV_PROXY_ADDRESS" fi export https_proxy="$PROXY_ADDRESS" echo "Using proxy address $PROXY_ADDRESS" } function configure_no_proxy() { if [ -z "$PROXY_ADDRESS" ]; then return fi local addresses="localhost,127.0.0.1,.svc,.local,.default,kubernetes" if [ -n "$ENV_NO_PROXY" ]; then addresses="${addresses},${ENV_NO_PROXY}" fi if [ -n "$KOTSADM_VERSION" ]; then addresses="${addresses},kotsadm-api-node" fi if [ -n "$ROOK_VERSION" ]; then addresses="${addresses},.rook-ceph" fi if [ -n "$FLUENTD_VERSION" ]; then addresses="${addresses},.logging" fi if [ -n "$REGISTRY_VERSION" ]; then addresses="${addresses},.kurl" fi if [ -n "$PROMETHEUS_VERSION" ]; then addresses="${addresses},.monitoring" fi if [ -n "$VELERO_VERSION" ] && [ -n "$VELERO_NAMESPACE" ]; then addresses="${addresses},.${VELERO_NAMESPACE}" fi if [ -n "$MINIO_VERSION" ] && [ -n "$MINIO_NAMESPACE" ]; then addresses="${addresses},.${MINIO_NAMESPACE}" fi if [ -n "$PRIVATE_ADDRESS" ]; then addresses="${addresses},${PRIVATE_ADDRESS}" fi if [ -n "$LOAD_BALANCER_ADDRESS" ]; then addresses="${addresses},${LOAD_BALANCER_ADDRESS}" fi if [ -n "$KUBERNETES_MASTER_ADDR" ]; then addresses="${addresses},${KUBERNETES_MASTER_ADDR}" fi if [ -n "$POD_CIDR" ]; then addresses="${addresses},${POD_CIDR}" fi if [ -n "$SERVICE_CIDR" ]; then addresses="${addresses},${SERVICE_CIDR}" fi if [ -n "$ADDITIONAL_NO_PROXY_ADDRESSES" ]; then addresses="${addresses},${ADDITIONAL_NO_PROXY_ADDRESSES}" fi # filter duplicates addresses=$(echo "$addresses" | sed 's/,/\n/g' | sort | uniq | paste -s --delimiters=",") # kubeadm requires this in the environment to reach K8s masters export no_proxy="$addresses" NO_PROXY_ADDRESSES="$addresses" echo "Exported no_proxy: $no_proxy" } PV_BASE_PATH=/opt/replicated/rook function disable_rook_ceph_operator() { if ! is_rook_1; then return 0 fi kubectl -n rook-ceph scale deployment rook-ceph-operator --replicas=0 } function enable_rook_ceph_operator() { if ! is_rook_1; then return 0 fi kubectl -n rook-ceph scale deployment rook-ceph-operator --replicas=1 } function is_rook_1() { kubectl -n rook-ceph get cephblockpools replicapool &>/dev/null } function upgrade_kubernetes() { if [ "$KUBERNETES_UPGRADE" != "1" ]; then enable_rook_ceph_operator return fi disable_rook_ceph_operator upgrade_kubernetes_step upgrade_kubernetes_minor upgrade_kubernetes_patch enable_rook_ceph_operator } function upgrade_kubernetes_step() { if [ "$KUBERNETES_STEP_LOCAL_PRIMARY" == "1" ]; then upgrade_kubernetes_local_master_minor "$STEP_VERSION" fi if [ "$KUBERNETES_STEP_REMOTE_PRIMARIES" == "1" ]; then upgrade_kubernetes_remote_masters_minor "$STEP_VERSION" fi if [ "$KUBERNETES_STEP_SECONDARIES" == "1" ]; then upgrade_kubernetes_workers_minor "$STEP_VERSION" fi } function upgrade_kubernetes_minor() { if [ "$KUBERNETES_UPGRADE_LOCAL_PRIMARY_MINOR" == "1" ]; then upgrade_kubernetes_local_master_minor "$KUBERNETES_VERSION" fi if [ "$KUBERNETES_UPGRADE_REMOTE_PRIMARIES_MINOR" == "1" ]; then upgrade_kubernetes_remote_masters_minor "$KUBERNETES_VERSION" fi if [ "$KUBERNETES_UPGRADE_SECONDARIES_MINOR" == "1" ]; then upgrade_kubernetes_workers_minor "$KUBERNETES_VERSION" fi } function upgrade_kubernetes_patch() { if [ "$KUBERNETES_UPGRADE_LOCAL_PRIMARY_PATCH" == "1" ]; then upgrade_kubernetes_local_master_patch "$KUBERNETES_VERSION" fi if [ "$KUBERNETES_UPGRADE_REMOTE_PRIMARIES_PATCH" == "1" ]; then upgrade_kubernetes_remote_masters_patch "$KUBERNETES_VERSION" fi if [ "$KUBERNETES_UPGRADE_SECONDARIES_PATCH" == "1" ]; then upgrade_kubernetes_workers_patch "$KUBERNETES_VERSION" fi } function upgrade_kubernetes_local_master_patch() { local k8sVersion=$1 local node=$(hostname) load_images $DIR/packages/kubernetes/$k8sVersion/images upgrade_kubeadm "$k8sVersion" kubeadm_config_migrate kubeadm upgrade plan "v${k8sVersion}" printf "${YELLOW}Drain local node and apply upgrade? ${NC}" confirmY " " kubernetes_drain "$node" spinner_kubernetes_api_stable kubeadm upgrade apply "v$k8sVersion" --yes --config /opt/replicated/kubeadm.conf --force sed -i "s/kubernetesVersion:.*/kubernetesVersion: v${k8sVersion}/" /opt/replicated/kubeadm.conf kubernetes_install_host_packages "$k8sVersion" systemctl daemon-reload systemctl restart kubelet spinner_kubernetes_api_stable kubectl uncordon "$node" spinner_until 120 kubernetes_node_has_version "$node" "$k8sVersion" spinner_until 120 kubernetes_nodes_ready } function upgrade_kubeadm() { local k8sVersion=$1 if [ "$AIRGAP" != "1" ] && [ -n "$DIST_URL" ]; then kubernetes_get_host_packages_online "$k8sVersion" fi case "$LSB_DIST" in ubuntu) cp $DIR/packages/kubernetes/${k8sVersion}/ubuntu-${DIST_VERSION}/kubeadm /usr/bin/kubeadm ;; centos|rhel|amzn) cp $DIR/packages/kubernetes/${k8sVersion}/rhel-7/kubeadm /usr/bin/kubeadm ;; esac chmod a+rx /usr/bin/kubeadm } function upgrade_kubernetes_remote_masters_patch() { while read -r master; do upgrade_kubernetes_remote_node_patch "$master" done < <(try_1m kubernetes_remote_masters) spinner_until 120 kubernetes_nodes_ready } function upgrade_kubernetes_workers_patch() { while read -r worker; do upgrade_kubernetes_remote_node_patch "$worker" done < <(try_1m kubernetes_workers) } function upgrade_kubernetes_remote_node_patch() { # one line of output from `kubectl get nodes` local node="$1" nodeName=$(echo "$node" | awk '{ print $1 }') nodeVersion="$(echo "$node" | awk '{ print $5 }' | sed 's/v//' )" semverParse "$nodeVersion" nodeMinor="$minor" nodePatch="$patch" if [ "$nodeMinor" -gt "$KUBERNETES_TARGET_VERSION_MINOR" ]; then continue fi if [ "$nodeMinor" -eq "$KUBERNETES_TARGET_VERSION_MINOR" ] && [ "$nodePatch" -ge "$KUBERNETES_TARGET_VERSION_PATCH" ]; then continue fi DOCKER_REGISTRY_IP=$(kubectl -n kurl get service registry -o=jsonpath='{@.spec.clusterIP}' 2>/dev/null || echo "") printf "${YELLOW}Drain node $nodeName to prepare for upgrade? ${NC}" confirmY " " kubernetes_drain "$nodeName" local dockerRegistryIP="" if [ -n "$DOCKER_REGISTRY_IP" ]; then dockerRegistryIP=" docker-registry-ip=$DOCKER_REGISTRY_IP" fi local noProxyAddrs="" if [ -n "$NO_PROXY_ADDRESSES" ]; then noProxyAddrs=" additional-no-proxy-addresses=${NO_PROXY_ADDRESSES}" fi printf "\n\n\tRun the upgrade script on remote node to proceed: ${GREEN}$nodeName${NC}\n\n" if [ "$AIRGAP" = "1" ]; then printf "\t${GREEN}cat upgrade.sh | sudo bash -s airgap kubernetes-version=${KUBERNETES_VERSION}${noProxyAddrs}${dockerRegistryIP}${NC}\n\n" elif [ -z "$KURL_URL" ]; then printf "\t${GREEN}cat upgrade.sh | sudo bash -s kubernetes-version=${KUBERNETES_VERSION}${noProxyAddrs}${dockerRegistryIP}${NC}\n\n" else local prefix="curl $KURL_URL/$INSTALLER_ID/" if [ -z "$KURL_URL" ]; then prefix="cat " fi printf "\t${GREEN} ${prefix}upgrade.sh | sudo bash -s kubernetes-version=${KUBERNETES_VERSION}${noProxyAddrs}${dockerRegistryIP}${NC}\n\n" fi spinner_until -1 kubernetes_node_has_version "$nodeName" "$KUBERNETES_VERSION" logSuccess "Kubernetes $KUBERNETES_VERSION detected on $nodeName" kubectl uncordon "$nodeName" } function upgrade_kubernetes_local_master_minor() { local k8sVersion=$1 local node=$(hostname) load_images $DIR/packages/kubernetes/$k8sVersion/images upgrade_kubeadm "$k8sVersion" kubeadm_config_migrate kubeadm upgrade plan "v${k8sVersion}" printf "${YELLOW}Drain local node and apply upgrade? ${NC}" confirmY " " spinner_kubernetes_api_stable kubeadm upgrade apply "v$k8sVersion" --yes --config /opt/replicated/kubeadm.conf --force upgrade_etcd_image_18 "$k8sVersion" sed -i "s/kubernetesVersion:.*/kubernetesVersion: v${k8sVersion}/" /opt/replicated/kubeadm.conf kubernetes_install_host_packages "$k8sVersion" systemctl daemon-reload systemctl restart kubelet spinner_kubernetes_api_stable kubectl uncordon "$node" # force deleting the cache because the api server will use the stale API versions after kubeadm upgrade rm -rf $HOME/.kube spinner_until 120 kubernetes_node_has_version "$node" "$k8sVersion" spinner_until 120 kubernetes_nodes_ready } function upgrade_kubernetes_remote_masters_minor() { local k8sVersion="$1" while read -r master; do upgrade_kubernetes_remote_node_minor "$master" "$k8sVersion" done < <(try_1m kubernetes_remote_masters) spinner_until 120 kubernetes_nodes_ready } function upgrade_kubernetes_workers_minor() { local k8sVersion="$1" while read -r worker; do upgrade_kubernetes_remote_node_minor "$worker" "$k8sVersion" done < <(try_1m kubernetes_workers) } function upgrade_kubernetes_remote_node_minor() { # one line of output from `kubectl get nodes` local node="$1" local targetK8sVersion="$2" nodeName=$(echo "$node" | awk '{ print $1 }') nodeVersion="$(echo "$node" | awk '{ print $5 }' | sed 's/v//' )" semverParse "$nodeVersion" nodeMinor="$minor" nodePatch="$patch" semverParse "$targetK8sVersion" local targetMinor="$minor" local targetPatch="$patch" if [ "$nodeMinor" -ge "$targetMinor" ]; then continue fi DOCKER_REGISTRY_IP=$(kubectl -n kurl get service registry -o=jsonpath='{@.spec.clusterIP}' 2>/dev/null || echo "") printf "${YELLOW}Drain node $nodeName to prepare for upgrade? ${NC}" confirmY " " kubernetes_drain "$nodeName" local dockerRegistryIP="" if [ -n "$DOCKER_REGISTRY_IP" ]; then dockerRegistryIP=" docker-registry-ip=$DOCKER_REGISTRY_IP" fi local noProxyAddrs="" if [ -n "$NO_PROXY_ADDRESSES" ]; then noProxyAddrs=" additional-no-proxy-addresses=${NO_PROXY_ADDRESSES}" fi printf "\n\n\tRun the upgrade script on remote node to proceed: ${GREEN}$nodeName${NC}\n\n" if [ "$AIRGAP" = "1" ]; then printf "\t${GREEN}cat upgrade.sh | sudo bash -s airgap kubernetes-version=${targetK8sVersion}${noProxyAddrs}${dockerRegistryIP}${NC}\n\n" elif [ -z "$KURL_URL" ]; then printf "\t${GREEN}cat upgrade.sh | sudo bash -s kubernetes-version=${targetK8sVersion}${noProxyAddrs}${dockerRegistryIP}${NC}\n\n" else local prefix="curl $KURL_URL/$INSTALLER_ID/" if [ -z "$KURL_URL" ]; then prefix="cat " fi printf "\t${GREEN} ${prefix}upgrade.sh | sudo bash -s kubernetes-version=${targetK8sVersion}${noProxyAddrs}${dockerRegistryIP}${NC}\n\n" fi rm -rf $HOME/.kube spinner_until -1 kubernetes_node_has_version "$nodeName" "$targetK8sVersion" logSuccess "Kubernetes $targetK8sVersion detected on $nodeName" kubectl uncordon "$nodeName" spinner_until 120 kubernetes_nodes_ready } # In k8s 1.18 the etcd image tag changed from 3.4.3 to 3.4.3-0 but kubeadm does not rewrite the # etcd manifest to use the new tag. When kubeadm init is run after the upgrade it switches to the # tag and etcd takes a few minutes to restart, which often results in kubeadm init failing. This # forces use of the updated tag so that the restart of etcd happens during upgrade when the node is # already drained function upgrade_etcd_image_18() { semverParse "$1" if [ "$minor" != "18" ]; then return 0 fi local etcd_tag=$(kubeadm config images list 2>/dev/null | grep etcd | awk -F':' '{ print $NF }') sed -i "s/image: k8s.gcr.io\/etcd:.*/image: k8s.gcr.io\/etcd:$etcd_tag/" /etc/kubernetes/manifests/etcd.yaml } function kubeadm_config_migrate() { # Does not migrate kube-proxy or kubelet configs kubeadm config migrate --old-config $KUBEADM_CONF_FILE --new-config $KUBEADM_CONF_FILE cat << EOF >> $KUBEADM_CONF_FILE --- apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration cgroupDriver: systemd --- EOF kubectl -n kube-system get configmaps kube-proxy -o yaml > /tmp/temp.yaml $DIR/bin/yamlutil -p -fp /tmp/temp.yaml -yp data_config.conf cat /tmp/temp.yaml >> /opt/replicated/kubeadm.conf rm /tmp/temp.yaml } function download_util_binaries() { if [ -z "$AIRGAP" ] && [ -n "$DIST_URL" ]; then curl -Ss -L $DIST_URL/$KURL_BIN_UTILS_FILE | tar zx fi BIN_SYSTEM_CONFIG=./bin/config BIN_YAMLUTIL=./bin/yamlutil BIN_DOCKER_CONFIG=./bin/docker-config BIN_SUBNET=./bin/subnet BIN_INSTALLERMERGE=./bin/installermerge BIN_YAMLTOBASH=./bin/yamltobash BIN_BASHTOYAML=./bin/bashmerge mkdir -p /tmp/kurl-bin-utils/scripts CONFIGURE_SELINUX_SCRIPT=/tmp/kurl-bin-utils/scripts/configure_selinux.sh CONFIGURE_FIREWALLD_SCRIPT=/tmp/kurl-bin-utils/scripts/configure_firewalld.sh CONFIGURE_IPTABLES_SCRIPT=/tmp/kurl-bin-utils/scripts/configure_iptables.sh mkdir -p /tmp/kurl-bin-utils/specs MERGED_YAML_SPEC=/tmp/kurl-bin-utils/specs/merged.yaml PARSED_YAML_SPEC=/tmp/kurl-bin-utils/scripts/variables.sh } function apply_bash_flag_overrides() { if [ -n "$1" ] || [ -n "$IS_CURRENTLY_HA" ]; then temp_var="$@" if [ -n "$IS_CURRENTLY_HA" ]; then temp_var="$temp_var ha" #this might end up duplicating the 'ha' flag, but our parsing doesn't care about duplicates fi $BIN_BASHTOYAML -c $MERGED_YAML_SPEC -f "$temp_var" fi } function parse_yaml_into_bash_variables() { $BIN_YAMLTOBASH -i $MERGED_YAML_SPEC -b $PARSED_YAML_SPEC source $PARSED_YAML_SPEC rm $PARSED_YAML_SPEC } parse_kubernetes_target_version() { semverParse "$KUBERNETES_VERSION" KUBERNETES_TARGET_VERSION_MAJOR="$major" KUBERNETES_TARGET_VERSION_MINOR="$minor" KUBERNETES_TARGET_VERSION_PATCH="$patch" } function yaml_airgap() { # this is needed because the parsing for yaml comes after the first occasion where the $AIRGAP flag is used # we also account for if $INSTALLER_YAML spec has "$AIRGAP and "INSTALLER_SPEC_FILE spec turns it off" if [[ "$INSTALLER_YAML" =~ "airgap: true" ]]; then AIRGAP="1" fi if [ -n "$INSTALLER_SPEC_FILE" ]; then if grep -q "airgap: true" $INSTALLER_SPEC_FILE; then AIRGAP="1" fi if grep -q "airgap: false" $INSTALLER_SPEC_FILE; then AIRGAP="" fi fi } function get_patch_yaml() { while [ "$1" != "" ]; do _param="$(echo "$1" | cut -d= -f1)" _value="$(echo "$1" | grep '=' | cut -d= -f2-)" case $_param in installer-spec-file) INSTALLER_SPEC_FILE="$_value" ;; additional-no-proxy-addresses) ;; airgap) AIRGAP="1" ;; kurl-registry-ip) KURL_REGISTRY_IP="$_value" ;; cert-key) ;; control-plane) ;; docker-registry-ip) ;; ha) ;; kubeadm-token) ;; kubeadm-token-ca-hash) ;; kubernetes-master-address) ;; kubernetes-version) ;; load-balancer-address) ;; preserve-docker-config) ;; preserve-firewalld-config) ;; preserve-iptables-config) ;; preserve-selinux-config) ;; public-address) ;; yes) ASSUME_YES=1 ;; auto-upgrades-enabled) AUTO_UPGRADES_ENABLED=1 ;; *) echo >&2 "Error: unknown parameter \"$_param\"" exit 1 ;; esac shift done } function merge_yaml_specs() { if [ -z "$INSTALLER_SPEC_FILE" ] && [ -z "$INSTALLER_YAML" ]; then echo "no yaml spec found" bail fi if [ -z "$INSTALLER_YAML" ]; then cp -f $INSTALLER_SPEC_FILE $MERGED_YAML_SPEC ONLY_APPLY_MERGED=1 return fi if [ -z "$INSTALLER_SPEC_FILE" ]; then cat > $MERGED_YAML_SPEC < /tmp/vendor_kurl_installer_spec_docker.yaml </dev/null | grep 'master' | wc -l) #get nodes with the 'master' role, and then search for 'master' to remove the column labels row if [ "$master_count" -gt 1 ]; then IS_CURRENTLY_HA="true" fi } function render_yaml() { eval "echo \"$(cat $DIR/yaml/$1)\"" } function render_yaml_file() { eval "echo \"$(cat $1)\"" } function render_file() { eval "echo \"$(cat $1)\"" } function insert_patches_strategic_merge() { local kustomization_file="$1" local patch_file="$2" if ! grep -q "patchesStrategicMerge" "$kustomization_file"; then echo "patchesStrategicMerge:" >> "$kustomization_file" fi sed -i "/patchesStrategicMerge.*/a - $patch_file" "$kustomization_file" } function insert_resources() { local kustomization_file="$1" local resource_file="$2" if ! grep -q "resources" "$kustomization_file"; then echo "resources:" >> "$kustomization_file" fi sed -i "/resources.*/a - $resource_file" "$kustomization_file" } function setup_kubeadm_kustomize() { # Clean up the source directories for the kubeadm kustomize resources and # patches. rm -rf $DIR/kustomize/kubeadm/init cp -rf $DIR/kustomize/kubeadm/init-orig $DIR/kustomize/kubeadm/init rm -rf $DIR/kustomize/kubeadm/join cp -rf $DIR/kustomize/kubeadm/join-orig $DIR/kustomize/kubeadm/join rm -rf $DIR/kustomize/kubeadm/init-patches mkdir -p $DIR/kustomize/kubeadm/init-patches rm -rf $DIR/kustomize/kubeadm/join-patches mkdir -p $DIR/kustomize/kubeadm/join-patches if [ -n "$USE_STANDARD_PORT_RANGE" ]; then sed -i 's/80-60000/30000-32767/g' $DIR/kustomize/kubeadm/init/kubeadm-cluster-config-v1beta2.yml fi } function apply_installer_crd() { INSTALLER_CRD_DEFINITION="$DIR/kurlkinds/cluster.kurl.sh_installers.yaml" kubectl apply -f "$INSTALLER_CRD_DEFINITION" if [ -z "$ONLY_APPLY_MERGED" ] && [ -n "$INSTALLER_YAML" ]; then ORIGINAL_INSTALLER_SPEC=/tmp/kurl-bin-utils/specs/original.yaml cat > $ORIGINAL_INSTALLER_SPEC < /tmp/Corefile # Example lines to replace from k8s 1.17 and 1.19 # "forward . /etc/resolv.conf" => "forward . 8.8.8.8" # "forward . /etc/resolv.conf {" => "forward . 8.8.8.8 {" sed -i "s/forward \. \/etc\/resolv\.conf/forward \. ${NAMESERVER}/" /tmp/Corefile kubectl -n kube-system replace configmap coredns -f /tmp/Corefile kubectl -n kube-system rollout restart deployment/coredns } function init() { logStep "Initialize Kubernetes" kubernetes_maybe_generate_bootstrap_token API_SERVICE_ADDRESS="$PRIVATE_ADDRESS:6443" if [ "$HA_CLUSTER" = "1" ]; then API_SERVICE_ADDRESS="$LOAD_BALANCER_ADDRESS:$LOAD_BALANCER_PORT" fi local oldLoadBalancerAddress=$(kubernetes_load_balancer_address) if commandExists ekco_handle_load_balancer_address_change_pre_init; then ekco_handle_load_balancer_address_change_pre_init $oldLoadBalancerAddress $LOAD_BALANCER_ADDRESS fi kustomize_kubeadm_init=./kustomize/kubeadm/init CERT_KEY= CERT_KEY_EXPIRY= if [ "$HA_CLUSTER" = "1" ]; then CERT_KEY=$(< /dev/urandom tr -dc a-f0-9 | head -c64) CERT_KEY_EXPIRY=$(TZ="UTC" date -d "+2 hour" --rfc-3339=second | sed 's/ /T/') insert_patches_strategic_merge \ $kustomize_kubeadm_init/kustomization.yaml \ patch-certificate-key.yaml fi # kustomize can merge multiple list patches in some cases but it is not working for me on the # ClusterConfiguration.apiServer.certSANs list if [ -n "$PUBLIC_ADDRESS" ] && [ -n "$LOAD_BALANCER_ADDRESS" ]; then insert_patches_strategic_merge \ $kustomize_kubeadm_init/kustomization.yaml \ patch-public-and-load-balancer-address.yaml elif [ -n "$PUBLIC_ADDRESS" ]; then insert_patches_strategic_merge \ $kustomize_kubeadm_init/kustomization.yaml \ patch-public-address.yaml elif [ -n "$LOAD_BALANCER_ADDRESS" ]; then insert_patches_strategic_merge \ $kustomize_kubeadm_init/kustomization.yaml \ patch-load-balancer-address.yaml fi # Add kubeadm init patches from addons. for patch in $(ls -1 ${kustomize_kubeadm_init}-patches/* 2>/dev/null || echo); do patch_basename="$(basename $patch)" cp $patch $kustomize_kubeadm_init/$patch_basename insert_patches_strategic_merge \ $kustomize_kubeadm_init/kustomization.yaml \ $patch_basename done mkdir -p "$KUBEADM_CONF_DIR" kubectl kustomize $kustomize_kubeadm_init > $KUBEADM_CONF_DIR/kubeadm-init-raw.yaml render_yaml_file $KUBEADM_CONF_DIR/kubeadm-init-raw.yaml > $KUBEADM_CONF_FILE # kustomize requires assests have a metadata field while kubeadm config will reject yaml containing it # this uses a go binary found in kurl/cmd/yamlutil to strip the metadata field from the yaml # cp $KUBEADM_CONF_FILE $KUBEADM_CONF_DIR/kubeadm_conf_copy_in $DIR/bin/yamlutil -r -fp $KUBEADM_CONF_DIR/kubeadm_conf_copy_in -yf metadata mv $KUBEADM_CONF_DIR/kubeadm_conf_copy_in $KUBEADM_CONF_FILE cat << EOF >> $KUBEADM_CONF_FILE apiVersion: kubelet.config.k8s.io/v1beta1 kind: KubeletConfiguration cgroupDriver: systemd --- EOF # When no_proxy changes kubeadm init rewrites the static manifests and fails because the api is # restarting. Trigger the restart ahead of time and wait for it to be healthy. if [ -f "/etc/kubernetes/manifests/kube-apiserver.yaml" ] && [ -n "$no_proxy" ] && ! cat /etc/kubernetes/manifests/kube-apiserver.yaml | grep -q "$no_proxy"; then kubeadm init phase control-plane apiserver --config $KUBEADM_CONF_FILE sleep 2 if ! spinner_until 60 kubernetes_api_is_healthy; then echo "Failed to wait for kubernetes API restart after no_proxy change" # continue fi fi if [ "$HA_CLUSTER" = "1" ]; then UPLOAD_CERTS="--upload-certs" fi # kubeadm init temporarily taints this node which causes rook to move any mons on it and may # lead to a loss of quorum disable_rook_ceph_operator # since K8s 1.19.1 kubeconfigs point to local API server even in HA setup. When upgrading from # earlier versions and using a load balancer, kubeadm init will bail because the kubeconfigs # already exist pointing to the load balancer rm -f /etc/kubernetes/*.conf # Regenerate api server cert in case load balancer address changed if [ -f /etc/kubernetes/pki/apiserver.crt ]; then mv -f /etc/kubernetes/pki/apiserver.crt /tmp/ fi if [ -f /etc/kubernetes/pki/apiserver.key ]; then mv -f /etc/kubernetes/pki/apiserver.key /tmp/ fi set -o pipefail kubeadm init \ --ignore-preflight-errors=all \ --config $KUBEADM_CONF_FILE \ $UPLOAD_CERTS \ | tee /tmp/kubeadm-init set +o pipefail if [ -n "$LOAD_BALANCER_ADDRESS" ]; then spinner_until 120 cert_has_san "$PRIVATE_ADDRESS:6443" "$LOAD_BALANCER_ADDRESS" fi spinner_kubernetes_api_stable exportKubeconfig KUBEADM_TOKEN_CA_HASH=$(cat /tmp/kubeadm-init | grep 'discovery-token-ca-cert-hash' | awk '{ print $2 }' | head -1) waitForNodes enable_rook_ceph_operator DID_INIT_KUBERNETES=1 logSuccess "Kubernetes Master Initialized" local currentLoadBalancerAddress=$(kubernetes_load_balancer_address) if [ "$currentLoadBalancerAddress" != "$oldLoadBalancerAddress" ]; then # restart scheduler and controller-manager on this node so they use the new address mv /etc/kubernetes/manifests/kube-scheduler.yaml /tmp/ && sleep 1 && mv /tmp/kube-scheduler.yaml /etc/kubernetes/manifests/ mv /etc/kubernetes/manifests/kube-controller-manager.yaml /tmp/ && sleep 1 && mv /tmp/kube-controller-manager.yaml /etc/kubernetes/manifests/ # restart kube-proxies so they use the new address kubectl -n kube-system delete pods --selector=k8s-app=kube-proxy if kubernetes_has_remotes; then local proxyFlag="" if [ -n "$PROXY_ADDRESS" ]; then proxyFlag=" -x $PROXY_ADDRESS" fi local prefix="curl -sSL${proxyFlag} $KURL_URL/$INSTALLER_ID/" if [ "$AIRGAP" = "1" ] || [ -z "$KURL_URL" ]; then prefix="cat " fi printf "${YELLOW}\nThe load balancer address has changed. Run the following on all remote nodes to use the new address${NC}\n" printf "\n" printf "${GREEN} ${prefix}tasks.sh | sudo bash -s set-kubeconfig-server https://${currentLoadBalancerAddress}${NC}\n" printf "\n" printf "Continue? " confirmY " " if commandExists ekco_handle_load_balancer_address_change_post_init; then ekco_handle_load_balancer_address_change_post_init $oldLoadBalancerAddress $LOAD_BALANCER_ADDRESS fi fi fi labelNodes kubectl cluster-info # create kurl namespace if it doesn't exist kubectl get ns kurl 2>/dev/null 1>/dev/null || kubectl create ns kurl 1>/dev/null logSuccess "Cluster Initialized" configure_coredns if commandExists containerd_registry_init; then containerd_registry_init fi } function post_init() { BOOTSTRAP_TOKEN_EXPIRY=$(kubeadm token list | grep $BOOTSTRAP_TOKEN | awk '{print $3}') kurl_config } function kubernetes_maybe_generate_bootstrap_token() { if [ -z "$BOOTSTRAP_TOKEN" ]; then logStep "generate kubernetes bootstrap token" BOOTSTRAP_TOKEN=$(kubeadm token generate) fi echo "Kubernetes bootstrap token: ${BOOTSTRAP_TOKEN}" echo "This token will expire in 24 hours" } function kurl_config() { if kubernetes_resource_exists kube-system configmap kurl-config; then kubectl -n kube-system delete configmap kurl-config fi kubectl -n kube-system create configmap kurl-config \ --from-literal=kurl_url="$KURL_URL" \ --from-literal=installer_id="$INSTALLER_ID" \ --from-literal=ha="$HA_CLUSTER" \ --from-literal=airgap="$AIRGAP" \ --from-literal=ca_hash="$KUBEADM_TOKEN_CA_HASH" \ --from-literal=docker_registry_ip="$DOCKER_REGISTRY_IP" \ --from-literal=kubernetes_api_address="$API_SERVICE_ADDRESS" \ --from-literal=bootstrap_token="$BOOTSTRAP_TOKEN" \ --from-literal=bootstrap_token_expiration="$BOOTSTRAP_TOKEN_EXPIRY" \ --from-literal=cert_key="$CERT_KEY" \ --from-literal=upload_certs_expiration="$CERT_KEY_EXPIRY" \ --from-literal=service_cidr="$SERVICE_CIDR" \ --from-literal=pod_cidr="$POD_CIDR" } function outro() { echo if [ -z "$PUBLIC_ADDRESS" ]; then if [ -z "$PRIVATE_ADDRESS" ]; then PUBLIC_ADDRESS="" PRIVATE_ADDRESS="" else PUBLIC_ADDRESS="$PRIVATE_ADDRESS" fi fi local dockerRegistryIP="" if [ -n "$DOCKER_REGISTRY_IP" ]; then dockerRegistryIP=" docker-registry-ip=$DOCKER_REGISTRY_IP" fi local proxyFlag="" local noProxyAddrs="" if [ -n "$PROXY_ADDRESS" ]; then proxyFlag=" -x $PROXY_ADDRESS" noProxyAddrs=" additional-no-proxy-addresses=${SERVICE_CIDR},${POD_CIDR}" fi KUBEADM_TOKEN_CA_HASH=$(cat /tmp/kubeadm-init | grep 'discovery-token-ca-cert-hash' | awk '{ print $2 }' | head -1) printf "\n" printf "\t\t${GREEN}Installation${NC}\n" printf "\t\t${GREEN} Complete ✔${NC}\n" addon_outro printf "\n" kubeconfig_setup_outro printf "\n" if [ "$OUTRO_NOTIFIY_TO_RESTART_DOCKER" = "1" ]; then printf "\n" printf "\n" printf "The local /etc/docker/daemon.json has been merged with the spec from the installer, but has not been applied. To apply restart docker." printf "\n" printf "\n" printf "${GREEN} systemctl daemon-reload${NC}\n" printf "${GREEN} systemctl restart docker${NC}\n" printf "\n" printf "These settings will automatically be applied on the next restart." printf "\n" fi printf "\n" printf "\n" local prefix="curl -sSL${proxyFlag} $KURL_URL/$INSTALLER_ID/" if [ -z "$KURL_URL" ]; then prefix="cat " fi if [ "$HA_CLUSTER" = "1" ]; then printf "Master node join commands expire after two hours, and worker node join commands expire after 24 hours.\n" printf "\n" if [ "$AIRGAP" = "1" ]; then printf "To generate new node join commands, run ${GREEN}cat ./tasks.sh | sudo bash -s join_token ha airgap${NC} on an existing master node.\n" else printf "To generate new node join commands, run ${GREEN}${prefix}tasks.sh | sudo bash -s join_token ha${NC} on an existing master node.\n" fi else printf "Node join commands expire after 24 hours.\n" printf "\n" if [ "$AIRGAP" = "1" ]; then printf "To generate new node join commands, run ${GREEN}cat ./tasks.sh | sudo bash -s join_token airgap${NC} on this node.\n" else printf "To generate new node join commands, run ${GREEN}${prefix}tasks.sh | sudo bash -s join_token${NC} on this node.\n" fi fi if [ "$AIRGAP" = "1" ]; then printf "\n" printf "To add worker nodes to this installation, copy and unpack this bundle on your other nodes, and run the following:" printf "\n" printf "\n" printf "${GREEN} cat ./join.sh | sudo bash -s airgap kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=${KUBEADM_TOKEN_CA_HASH} kubernetes-version=${KUBERNETES_VERSION}${dockerRegistryIP}${noProxyAddrs}\n" printf "${NC}" printf "\n" printf "\n" if [ "$HA_CLUSTER" = "1" ]; then printf "\n" printf "To add ${GREEN}MASTER${NC} nodes to this installation, copy and unpack this bundle on your other nodes, and run the following:" printf "\n" printf "\n" printf "${GREEN} cat ./join.sh | sudo bash -s airgap kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=${KUBEADM_TOKEN_CA_HASH} kubernetes-version=${KUBERNETES_VERSION} cert-key=${CERT_KEY} control-plane${dockerRegistryIP}${noProxyAddrs}\n" printf "${NC}" printf "\n" printf "\n" fi else printf "\n" printf "To add worker nodes to this installation, run the following script on your other nodes:" printf "\n" printf "${GREEN} ${prefix}join.sh | sudo bash -s kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=${KUBEADM_TOKEN_CA_HASH} kubernetes-version=${KUBERNETES_VERSION}${dockerRegistryIP}${noProxyAddrs}\n" printf "${NC}" printf "\n" printf "\n" if [ "$HA_CLUSTER" = "1" ]; then printf "\n" printf "To add ${GREEN}MASTER${NC} nodes to this installation, run the following script on your other nodes:" printf "\n" printf "${GREEN} ${prefix}join.sh | sudo bash -s kubernetes-master-address=${API_SERVICE_ADDRESS} kubeadm-token=${BOOTSTRAP_TOKEN} kubeadm-token-ca-hash=$KUBEADM_TOKEN_CA_HASH kubernetes-version=${KUBERNETES_VERSION} cert-key=${CERT_KEY} control-plane${dockerRegistryIP}${noProxyAddrs}\n" printf "${NC}" printf "\n" printf "\n" fi fi } function main() { export KUBECONFIG=/etc/kubernetes/admin.conf require_root_user get_patch_yaml "$@" yaml_airgap proxy_bootstrap download_util_binaries merge_yaml_specs is_ha apply_bash_flag_overrides "$@" parse_yaml_into_bash_variables parse_kubernetes_target_version discover full-cluster report_install_start trap prek8s_ctrl_c SIGINT # trap ctrl+c (SIGINT) and handle it by reporting that the user exited intentionally preflights prompts journald_persistent configure_proxy addon_for_each addon_pre_init discover_pod_subnet discover_service_subnet configure_no_proxy install_cri get_shared upgrade_kubernetes kubernetes_host setup_kubeadm_kustomize trap k8s_ctrl_c SIGINT # trap ctrl+c (SIGINT) and handle it by asking for a support bundle - only do this after k8s is installed addon_for_each addon_load init apply_installer_crd type create_registry_service &> /dev/null && create_registry_service # this function is in an optional addon and may be missing addon_for_each addon_install post_init outro report_install_success } main "$@"