Перейти к основному контенту

Kubernetes (HA)



🏆 FINAL BOSS GUIDE: МОДУЛЬ Б (KUBERNETES HA)

1. Подготовка DNS (Пункт 12.e)
echo "P@ssw0rd" | kinit admin && ipa dnsconfig-mod --forwarder=77.88.8.8
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null altlinux@192.168.0.101
echo "P@ssw0rd" | kinit admin && ipa dnsrecord-add au-team.cloud haproxy --a-rec=192.168.0.253
exit


adm-pc
su - admin01

sudo apt-get install wget

mkdir -p ~/infra/ansible/files && \
wget -4 https://raw.githubusercontent.com/projectcalico/calico/v3.27.3/manifests/tigera-operator.yaml -O ~/infra/ansible/files/tigera-operator.yaml && \
wget -4 https://raw.githubusercontent.com/projectcalico/calico/v3.27.3/manifests/custom-resources.yaml -O ~/infra/ansible/files/custom-resources.yaml && \
wget -4 https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml -O ~/infra/ansible/files/nodelocaldns.yaml

su - admin01

2. Инвентарь Ansible
nano ~/infra/ansible/inventories/staging/k8s_hosts


[masters]
k8s-srv1.au-team.cloud ansible_host=192.168.0.201
k8s-srv2.au-team.cloud ansible_host=192.168.0.202
k8s-srv3.au-team.cloud ansible_host=192.168.0.203

[workers]
k8s-srv4.au-team.cloud ansible_host=192.168.0.204
k8s-srv5.au-team.cloud ansible_host=192.168.0.205

[k8s_nodes:children]
masters
workers

[k8s_nodes:vars]
ansible_user=altlinux
ansible_become=yes
ansible_become_method=sudo
ansible_become_password=P@ssw0rd
ansible_ssh_common_args='-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null'

3. Ультимативный Playbook (install_k8s.yml)

nano ~/infra/ansible/install_k8s.yml

---
# 1. ПОДГОТОВКА И ПАКЕТЫ
- name: 1. Base Infrastructure Setup
  hosts: k8s_nodes
  become: yes
  tasks:
    - name: Heal OS State & Core Prep
      shell: |
        echo "nameserver 192.168.0.101" > /etc/resolv.conf
        echo "nameserver 8.8.8.8" >> /etc/resolv.conf
        echo 'Acquire::ForceIPv4 "true";' > /etc/apt/apt.conf.d/99force-ipv4
        rm -f /etc/gai.conf
        sysctl -w net.ipv6.conf.all.disable_ipv6=0
        swapoff -a
        sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
        modprobe overlay && modprobe br_netfilter && modprobe ip_vs && modprobe ip_vs_rr && modprobe ip_vs_wrr && modprobe ip_vs_sh && modprobe ip_set && modprobe nf_conntrack
        echo -e "overlay\nbr_netfilter\nnf_conntrack\nip_vs\nip_vs_rr\nip_vs_wrr\nip_vs_sh\nip_set" > /etc/modules-load.d/k8s.conf
        echo -e "net.bridge.bridge-nf-call-iptables = 1\nnet.bridge.bridge-nf-call-ip6tables = 1\nnet.ipv4.ip_forward = 1" > /etc/sysctl.d/k8s.conf
        sysctl --system

    - name: Install Packages
      shell: |
        rm -f /etc/apt/sources.list.d/*
        echo "rpm http://ftp.altlinux.org/pub/distributions/ALTLinux/p11/branch x86_64 classic" > /etc/apt/sources.list
        echo "rpm http://ftp.altlinux.org/pub/distributions/ALTLinux/p11/branch x86_64-i586 classic" >> /etc/apt/sources.list
        echo "rpm http://ftp.altlinux.org/pub/distributions/ALTLinux/p11/branch noarch classic" >> /etc/apt/sources.list
        rm -rf /var/lib/apt/lists/* && mkdir -p /var/lib/apt/lists/partial
        apt-get update && apt-get install -y containerd cri-tools1.35 kubernetes1.35-kubeadm kubernetes1.35-kubelet kubernetes1.35-client haproxy keepalived wget

    - name: Configure containerd and kubelet
      shell: |
        mkdir -p /etc/containerd
        containerd config default > /etc/containerd/config.toml
        sed -i 's/SystemdCgroup = false/SystemdCgroup = true/g' /etc/containerd/config.toml
        echo -e "runtime-endpoint: unix:///run/containerd/containerd.sock\nimage-endpoint: unix:///run/containerd/containerd.sock" > /etc/crictl.yaml
        systemctl restart containerd && systemctl enable --now kubelet

    - name: Disable IPv6
      sysctl:
        name: "{{ item }}"
        value: '1'
        state: present
        reload: yes
      loop:
        - net.ipv6.conf.all.disable_ipv6
        - net.ipv6.conf.default.disable_ipv6
        - net.ipv6.conf.lo.disable_ipv6

# 2. ОТКАЗОУСТОЙЧИВОСТЬ (HAProxy + Keepalived)
- name: 2. HA Setup
  hosts: masters
  become: yes
  tasks:
    - name: Configure HA stack
      shell: |
        cat <<EOF > /etc/haproxy/haproxy.cfg
        global
          daemon
        defaults
          mode tcp
          timeout connect 5000ms
          timeout client 50000ms
          timeout server 50000ms
        frontend k8s-api
          bind *:7443
          default_backend k8s-api-nodes
        backend k8s-api-nodes
          balance roundrobin
          server k8s-srv1 192.168.0.201:6443 check
          server k8s-srv2 192.168.0.202:6443 check
          server k8s-srv3 192.168.0.203:6443 check
        listen stats
          bind *:9000
          mode http
          stats enable
          stats uri /haproxy_stats
        EOF
        cat <<EOF > /etc/keepalived/keepalived.conf
        vrrp_instance VI_1 {
            state {{ 'MASTER' if inventory_hostname == 'k8s-srv1.au-team.cloud' else 'BACKUP' }}
            interface eth0
            virtual_router_id 51
            priority {{ 110 if inventory_hostname == 'k8s-srv1.au-team.cloud' else 105 if inventory_hostname == 'k8s-srv2.au-team.cloud' else 100 }}
            advert_int 1
            authentication { auth_type PASS; auth_pass K8S_HA_PASS }
            virtual_ipaddress { 192.168.0.253/24 }
        }
        EOF
        systemctl restart haproxy keepalived && systemctl enable haproxy keepalived

# 3. ИНИЦИАЛИЗАЦИЯ (SRV1)
- name: 3. Init Cluster
  hosts: k8s-srv1.au-team.cloud
  become: yes
  tasks:
    - name: Kubeadm Init via Config
      shell: |
        kubeadm reset -f || true
        rm -rf /etc/kubernetes/ /var/lib/etcd/ /var/lib/kubelet/ /root/.kube/
        mkdir -p /etc/kubernetes
        cat << 'CFG' > /etc/kubernetes/kubeadm-config.yaml
        apiVersion: kubeadm.k8s.io/v1beta4
        kind: InitConfiguration
        nodeRegistration:
          criSocket: "unix:///run/containerd/containerd.sock"
        ---
        apiVersion: kubeadm.k8s.io/v1beta4
        kind: ClusterConfiguration
        kubernetesVersion: "1.35.0"
        controlPlaneEndpoint: "192.168.0.253:7443"
        networking:
          podSubnet: "10.244.0.0/16"
          serviceSubnet: "10.96.0.0/12"
        ---
        apiVersion: kubeproxy.config.k8s.io/v1alpha1
        kind: KubeProxyConfiguration
        mode: "ipvs"
        ---
        apiVersion: kubelet.config.k8s.io/v1beta1
        kind: KubeletConfiguration
        clusterDNS:
          - "169.254.25.10"
        CFG
        kubeadm init --config /etc/kubernetes/kubeadm-config.yaml --upload-certs --ignore-preflight-errors=all
    
    - name: Capture Tokens
      shell: kubeadm token create --print-join-command
      register: worker_join_cmd

    - name: Capture Cert Key
      shell: kubeadm init phase upload-certs --upload-certs | tail -1
      register: cert_key

    - name: Save Facts
      set_fact:
        w_join: "{{ worker_join_cmd.stdout }}"
        c_key: "{{ cert_key.stdout }}"

    - name: 3.4. Setup Network & CNI
      block:
        - name: Prepare manifests directory
          file:
            path: /etc/kubernetes/manifests
            state: directory
            mode: '0755'

        - name: Prepare local kubeconfig for root
          file:
            path: /root/.kube
            state: directory
            mode: '0700'

        - name: Copy admin.conf to root kubeconfig
          copy:
            src: /etc/kubernetes/admin.conf
            dest: /root/.kube/config
            remote_src: yes

        # --- Установка Calico ---
        - name: Copy Calico Tigera Operator manifest
          copy:
            src: files/tigera-operator.yaml
            dest: /etc/kubernetes/manifests/tigera-operator.yaml

        - name: Apply Tigera Operator (Server-Side)
          command: kubectl apply --server-side -f /etc/kubernetes/manifests/tigera-operator.yaml

        - name: Create Calico Installation Manifest
          copy:
            dest: /etc/kubernetes/manifests/calico-install.yaml
            content: |
              apiVersion: operator.tigera.io/v1
              kind: Installation
              metadata:
                name: default
              spec:
                calicoNetwork:
                  ipPools:
                  - cidr: 10.244.0.0/16
                    encapsulation: VXLANCrossSubnet
                    natOutgoing: Enabled

        - name: Wait for Operator and Apply Calico
          shell: |
            sleep 15
            kubectl apply -f /etc/kubernetes/manifests/calico-install.yaml

        # --- Установка NodeLocalDNS ---
        - name: Copy NodeLocalDNS manifest
          copy:
            src: files/nodelocaldns.yaml
            dest: /etc/kubernetes/manifests/nodelocaldns-daemonset.yaml

        - name: Configure and Apply NodeLocalDNS
          shell: |
            sed -i 's/__PILLAR__DNS__SERVER__/10.96.0.10/g' /etc/kubernetes/manifests/nodelocaldns-daemonset.yaml
            sed -i 's/__PILLAR__LOCAL__DNS__/169.254.25.10/g' /etc/kubernetes/manifests/nodelocaldns-daemonset.yaml
            sed -i 's/__PILLAR__DNS__DOMAIN__/cluster.local/g' /etc/kubernetes/manifests/nodelocaldns-daemonset.yaml
            kubectl apply -f /etc/kubernetes/manifests/nodelocaldns-daemonset.yaml

      environment:
        KUBECONFIG: /etc/kubernetes/admin.conf
# 4. ПРИСОЕДИНЕНИЕ ОСТАЛЬНЫХ НОД
- name: 4. Join Nodes
  hosts: "all:!k8s-srv1.au-team.cloud"
  become: yes
  serial: 1
  tasks:
    - name: Reset before join
      shell: kubeadm reset -f || true
    - name: Clean Kubelet
      shell: rm -rf /etc/kubernetes/ /var/lib/etcd/ /var/lib/kubelet/ /root/.kube/
    - name: Join
      shell: |
        {{ hostvars['k8s-srv1.au-team.cloud']['w_join'] }} \
        {{ '--control-plane --certificate-key ' + hostvars['k8s-srv1.au-team.cloud']['c_key'] if 'masters' in group_names else '' }} \
        --ignore-preflight-errors=all

# 5. ЗАБОР CONFIG
- name: 5. Fetch config
  hosts: k8s-srv1.au-team.cloud
  become: yes
  tasks:
    - name: Restart CoreDNS
      shell: kubectl -n kube-system rollout restart deployment coredns
      environment: { KUBECONFIG: /etc/kubernetes/admin.conf }
    - name: Fetch admin.conf
      fetch:
        src: /etc/kubernetes/admin.conf
        dest: ~/.kube/config
        flat: yes

4. Финальная проверка (Пункт 18)
cd ~/infra/ansible/
source venv/ansible/bin/activate && ansible-playbook -i inventories/staging/k8s_hosts install_k8s.yml


echo "nameserver 192.168.0.101" > sudo /etc/resolv.conf && echo "nameserver 8.8.8.8" >> sudo /etc/resolv.conf && sudo apt-get install -y kubernetes1.35-client && mkdir -p ~/.kube
chmod 600 ~/.kube/config
kubectl get nodes -o wide
http://haproxy.au-team.cloud:9000/haproxy_stats











    - name: Setup Network & CNI
      shell: |
        mkdir -p /root/.kube && cp /etc/kubernetes/admin.conf /root/.kube/config
        sleep 10
        K="kubectl --server=https://127.0.0.1:6443 --insecure-skip-tls-verify=true"
        $K create -f https://raw.githubusercontent.com/projectcalico/calico/v3.27.3/manifests/tigera-operator.yaml || true
        cat <<EOF > /etc/kubernetes/manifests/calico-install.yaml
        apiVersion: operator.tigera.io/v1
          kind: Installation
          metadata:
            name: default
          spec:
            calicoNetwork:
              ipPools:
              - cidr: 10.244.0.0/16
                encapsulation: VXLANCrossSubnet
                natOutgoing: Enabled
        EOF
        $K apply -f /etc/kubernetes/manifests/calico-install.yaml
        wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml -O /etc/kubernetes/manifests/nodelocaldns-daemonset.yaml
        sed -i 's/__PILLAR__DNS__SERVER__/10.96.0.10/g' /etc/kubernetes/manifests/nodelocaldns-daemonset.yaml
        sed -i 's/__PILLAR__LOCAL__DNS__/169.254.25.10/g' /etc/kubernetes/manifests/nodelocaldns-daemonset.yaml
        sed -i 's/__PILLAR__DNS__DOMAIN__/cluster.local/g' /etc/kubernetes/manifests/nodelocaldns-daemonset.yaml
        $K apply -f /etc/kubernetes/manifests/nodelocaldns-daemonset.yaml
      environment: { KUBECONFIG: /etc/kubernetes/admin.conf }