DevOps/Study

k8s Deploy Study 1주차 Bootstrap Kubernetes the hard way

juyeon22 2026. 1. 10. 23:36

k8s Deploy Study 1주차 Bootstrap Kubernetes the hard way

가시다님이 운영하시는 k8s Deploy 1주차 내용을 정리한 게시글 입니다.

1. 소개

Bootstrap Kubernetes the hard way란 흔히 말하는 k8s를 설치할 때 Kubespray나 Kubeadm과 같은 설치도구를 사용해서 설치를 하는 것이 아닌 Kubernets Control Plane들의 요소들 Ex) etcd, api-server ... 을 직접 설치해서 배포를 하는 것입니다.

직접 설치를 해보는 이유는 각 요소들에 어떻게 작동하는지 자세히 알기 위함 입니다.

이번 주는 Kubernetes the hard way를 실습해보면서 학습하겠습니다.

실습 환경 구성

이번 실습에서는 Kind와 Vargrant를 이용해서 실습환경을 구성합니다.

실습 최종 구성도는 다음과 같습니다.

graph TB
    subgraph Cluster ["Kubernetes Cluster (192.168.10.0/24)"]

        %% Server Node
        subgraph Server ["Server (Control Plane) <br/> 192.168.10.100"]
            subgraph S_Systemd ["[systemd Services]"]
                etcd["etcd.service (http)"]
                api["kube-apiserver.service <br/>(Port: 6443)"]
                sch["kube-scheduler.service"]
                cm["kube-controller-manager.service"]
            end

            api --- etcd
            api --- sch
            api --- cm
        end

        %% Node-0
        subgraph Node0 ["Node-0 (Worker) <br/> 192.168.10.101"]
            subgraph N0_Systemd ["[systemd Services]"]
                k0["kubelet.service <br/>(Port: 10250)"]
                kp0["kube-proxy.service"]
                cr0["containerd.service"]
            end

            subgraph Pod0 ["Runtime: Pod"]
                p0["nginx (Pod) <br/> IP: 10.200.0.2"]
            end

            cr0 -.-> p0
        end

        %% Node-1
        subgraph Node1 ["Node-1 (Worker) <br/> 192.168.10.102"]
            subgraph N1_Systemd ["[systemd Services]"]
                k1["kubelet.service <br/>(Port: 10250)"]
                kp1["kube-proxy.service"]
                cr1["containerd.service"]
            end

            subgraph Pod1 ["Runtime: Pod"]
                p1["nginx (Pod) <br/> IP: 10.200.1.2"]
            end

            cr1 -.-> p1
        end

        %% --- 통신 흐름 ---
        api == "Status/Logs" ==> k0
        api == "Status/Logs" ==> k1
        kp0 -. "Watch" .-> api
        kp1 -. "Watch" .-> api
    end

    %% 스타일링
    style S_Systemd fill:#fff9c4,stroke:#fbc02d,stroke-dasharray: 5 5
    style N0_Systemd fill:#e1f5fe,stroke:#0288d1,stroke-dasharray: 5 5
    style N1_Systemd fill:#e1f5fe
# 실습용 kind Cluster 구성
# Create a cluster with kind
kind create cluster --name myk8s --image kindest/node:v1.32.8 --config - <<EOF
kind: Cluster
apiVersion: kind.x-k8s.io/v1alpha4
nodes:
- role: control-plane
  extraPortMappings:
  - containerPort: 30000
    hostPort: 30000
  - containerPort: 30001
    hostPort: 30001
- role: worker
EOF
# Vagrantfile , init_cfg.sh 파일 다운로드
curl.exe -O https://raw.githubusercontent.com/gasida/vagrant-lab/refs/heads/main/k8s-hardway/Vagrantfile
curl.exe -O https://raw.githubusercontent.com/gasida/vagrant-lab/refs/heads/main/k8s-hardway/init_cfg.sh

# 윈도우는 실습 시 BOX_VERSION변경 필요
# 202407.22.0

# 실습용 가상 머신 배포
vagrant up

2. JumpBox 서버 설정

Jump 서버 설명

실습에서 사용하는 JumpBox 서버는 중앙 관리용 서버이며, 설정해보겠습니다.

Jump 서버 설정 실습

vagrant ssh jumpbox

# Sync GitHub Repository
git clone --depth 1 https://github.com/kelseyhightower/kubernetes-the-hard-way.git
cd kubernetes-the-hard-way

# wget 으로 다운로드 실행 : 500MB Size 정도
wget -q --show-progress \
  --https-only \
  --timestamping \
  -P downloads \
  -i downloads-$(dpkg --print-architecture).txt

ARCH=$(dpkg --print-architecture)
mkdir -p downloads/{client,cni-plugins,controller,worker}
# 압축 풀기
tar -xvf downloads/crictl-v1.32.0-linux-${ARCH}.tar.gz \
  -C downloads/worker/ && tree -ug downloads

tar -xvf downloads/containerd-2.1.0-beta.0-linux-${ARCH}.tar.gz \
  --strip-components 1 \
  -C downloads/worker/ && tree -ug downloads

tar -xvf downloads/cni-plugins-linux-${ARCH}-v1.6.2.tgz \
  -C downloads/cni-plugins/ && tree -ug downloads

## --strip-components 1 : etcd-v3.6.0-rc.3-linux-amd64/etcd 경로의 앞부분(디렉터리)을 제거
tar -xvf downloads/etcd-v3.6.0-rc.3-linux-${ARCH}.tar.gz \
  -C downloads/ \
  --strip-components 1 \
  etcd-v3.6.0-rc.3-linux-${ARCH}/etcdctl \
  etcd-v3.6.0-rc.3-linux-${ARCH}/etcd && tree -ug downloads

# 파일 이동 
mv downloads/{etcdctl,kubectl} downloads/client/
mv downloads/{etcd,kube-apiserver,kube-controller-manager,kube-scheduler} downloads/controller/
mv downloads/{kubelet,kube-proxy} downloads/worker/
mv downloads/runc.${ARCH} downloads/worker/runc

# 불필요한 압축 파일 제거 및 권한 부여
rm -rf downloads/*gz
chmod +x downloads/{client,cni-plugins,controller,worker}/*

chown root:root downloads/client/etcdctl
chown root:root downloads/controller/etcd
chown root:root downloads/worker/crictl

# kubectl 설지
cp downloads/client/kubectl /usr/local/bin/

설정을 완료했으면 중앙 관리 서버에서 다른 서버들의 설정을 진행하겠습니다.


# 서버 설정 대상 주소 정보 파일 생성
cat <<EOF > machines.txt
192.168.10.100 server.kubernetes.local server
192.168.10.101 node-0.kubernetes.local node-0 10.200.0.0/24
192.168.10.102 node-1.kubernetes.local node-1 10.200.1.0/24
EOF
cat machines.txt

# sshd config 설정 파일 확인

grep "^[^#]" /etc/ssh/sshd_config

# PasswordAuthentication yes
# PermitRootLogin yes


# 서버 접속을 위한 SSH Key 파일 생성
ssh-keygen -t rsa -N "" -f /root/.ssh/id_rsa

# 서버 설정을 위해 key파일을 전달
while read IP FQDN HOST SUBNET; do
  sshpass -p 'qwe123' ssh-copy-id -o StrictHostKeyChecking=no root@${IP}
done < machines.txt

while read IP FQDN HOST SUBNET; do
  ssh -n root@${IP} cat /root/.ssh/authorized_keys
done < machines.txt
# ...
# /usr/bin/ssh-copy-id: INFO: Source of key(s) to be installed: "/root/.ssh/id_rsa.pub"
# /usr/bin/ssh-copy-id: INFO: attempting to log in with the new key(s), to filter out any that are already installed
# /usr/bin/ssh-copy-id: INFO: 1 key(s) remain to be installed -- if you are prompted now it is to install the new keys

# Number of key(s) added: 1
# ...
# ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDp8sIxHyjC803krVAnoqfh/h43fcqMTZ7CBQRYwapX2PrcDNnqr5ZoIOHnOA1NNTw7uRzvGnwb0K7pVfCyP7TNKqZmS7xXzWE8tHfTLG9Zb4wFzm+2Vzqwn4CoVgEp5aq7fn+v1doJJoaenf2tFPkSgq9+r4gTubi2qPpCobt4NbvJMKsjA1BpOVMSI1yPmOK2UziBGfXYxtsoAhIAogv5tLmOZXqxE1tq9luy251fdKrjL0aDxbvAZoiliOWKfVOALiL8csxvdfNx4Ur1LG2QN0aCUq0UyVnfCx7MZxpQ6vOco0vdhNhX10xtGEW0E4SjfgmFcQzsKjKK4YHiQ4NK31cJdow9aUXxZWn4U75lnE4Vo3mZ9d7M3nNvoGimW6P2KDOw+qYTsIz1VHrLzmIJsBOeRsmH1mTEcPD7sJ4Dsspak6NjWVKoVd3XCEDDJW5dF24Yvhn7AhRb+SjOagHDhJNgrd6eZUor8b0Jmu7CURXy/4Al345z7bMV1XuymyE= root@jumpbox

3. TLS인증서 생성

TLS인증서 설명

k8s 컴포넌트들과 통신 시 mTLS를 이용하여 통신을 하는데, 이때 인증서가 필요합니다.

1개의 Root CA와 인증서를 생성하고, Root CA를 기반으로 서버와 컴포넌트들의 인증서를 생성 해아합니다.

kind k8s에서 확인 했을때는 어떠한 인증서들이 존재하는지 확인 해보겠습니다.

docker exec -it myk8s-control-plane sh -c 'apt update && apt install tree yq jq -y'
docker exec -it myk8s-control-plane kubeadm certs check-expiration
# CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED
# admin.conf                 Jan 08, 2027 13:15 UTC   363d            ca                      no
# apiserver                  Jan 08, 2027 13:15 UTC   363d            ca                      no
# apiserver-etcd-client      Jan 08, 2027 13:15 UTC   363d            etcd-ca                 no
# apiserver-kubelet-client   Jan 08, 2027 13:15 UTC   363d            ca                      no
# controller-manager.conf    Jan 08, 2027 13:15 UTC   363d            ca                      no
# etcd-healthcheck-client    Jan 08, 2027 13:15 UTC   363d            etcd-ca                 no
# etcd-peer                  Jan 08, 2027 13:15 UTC   363d            etcd-ca                 no
# etcd-server                Jan 08, 2027 13:15 UTC   363d            etcd-ca                 no
# front-proxy-client         Jan 08, 2027 13:15 UTC   363d            front-proxy-ca          no
# scheduler.conf             Jan 08, 2027 13:15 UTC   363d            ca                      no
# super-admin.conf           Jan 08, 2027 13:15 UTC   363d            ca                      no

# CERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED
# ca                      Jan 06, 2036 13:15 UTC   9y              no
# etcd-ca                 Jan 06, 2036 13:15 UTC   9y              no
# front-proxy-ca          Jan 06, 2036 13:15 UTC   9y              no


docker exec -it myk8s-control-plane tree /etc/kubernetes
# /etc/kubernetes
# |-- admin.conf
# |-- controller-manager.conf
# |-- kubelet.conf
# |-- manifests
# |   |-- etcd.yaml
# |   |-- kube-apiserver.yaml
# |   |-- kube-controller-manager.yaml
# |   `-- kube-scheduler.yaml
# |-- pki
# |   |-- apiserver-etcd-client.crt
# |   |-- apiserver-etcd-client.key
# |   |-- apiserver-kubelet-client.crt
# |   |-- apiserver-kubelet-client.key
# |   |-- apiserver.crt
# |   |-- apiserver.key
# |   |-- ca.crt
# |   |-- ca.key
# |   |-- etcd
# |   |   |-- ca.crt
# |   |   |-- ca.key
# |   |   |-- healthcheck-client.crt
# |   |   |-- healthcheck-client.key
# |   |   |-- peer.crt
# |   |   |-- peer.key
# |   |   |-- server.crt
# |   |   `-- server.key
# |   |-- front-proxy-ca.crt
# |   |-- front-proxy-ca.key
# |   |-- front-proxy-client.crt
# |   |-- front-proxy-client.key
# |   |-- sa.key
# |   `-- sa.pub
# |-- scheduler.conf
# `-- super-admin.conf

TLS 인증서 생성 실습

github Repository에 존재하는 ca.conf 파일을 기준으로 rOOT 인증서를 생성해보겠습니다.

# Root CA 개인키 생성 : ca.key
openssl genrsa -out ca.key 4096

# 개인키 기반으로 인증서 생성
openssl req -x509 -new -sha512 -noenc \
  -key ca.key -days 3653 \
  -config ca.conf \
  -out ca.crt

# 인증서 내용 확인
openssl x509 -in ca.crt -text -noout
# Certificate:
#     Data:
#         Version: 3 (0x2)
#         Serial Number:
#             6e:a6:3f:85:c0:11:77:e9:5c:46:7e:71:a5:c0:a2:04:af:ba:ee:0b
#         Signature Algorithm: sha512WithRSAEncryption
#         Issuer: C = US, ST = Washington, L = Seattle, CN = CA
#         Validity
#             Not Before: Jan 10 04:45:26 2026 GMT
#             Not After : Jan 11 04:45:26 2036 GMT
#         Subject: C = US, ST = Washington, L = Seattle, CN = CA
# ...
# X509v3 extensions:
#     X509v3 Basic Constraints:
#         CA:TRUE
#     X509v3 Key Usage:
#         Certificate Sign, CRL Sign

admin용 인증서 생성

이번에는 admin 서버에서 사용하는 인증서를 생성해보겠습니다.

# Create Client and Server Certificates : admin
openssl genrsa -out admin.key 4096

# CSR 파일 생성
openssl req -new -key admin.key -sha256 \
  -config ca.conf -section admin \
  -out admin.csr

# CSR 파일을 이용하여 crt파일 생성
openssl x509 -req -days 3653 -in admin.csr \
  -copy_extensions copyall \
  -sha256 -CA ca.crt \
  -CAkey ca.key \
  -CAcreateserial \
  -out admin.crt
# Certificate request self-signature ok
# subject=CN = admin, O = system:masters


openssl x509 -in admin.crt -text -noout
# ...
#  Validity
#       Not Before: Jan 10 04:49:18 2026 GMT
#       Not After : Jan 11 04:49:18 2036 GMT
#   Subject: CN = admin, O = system:masters
# ...
# X509v3 extensions:
#     X509v3 Basic Constraints:
#         CA:FALSE
#     X509v3 Extended Key Usage:
#         TLS Web Client Authentication
#     X509v3 Key Usage: critical
#         Digital Signature, Key Encipherment
#     Netscape Cert Type:
#         SSL Client

Subject의 system:masters 그룹에 있는 사용자들은 API 서버에 대한 무제한 접근 권한을 부여하는 내장 그룹이기 때문에 RBAC 및 웹 훅 기반 인가도 거치지 않습니다.

나머지 인증서 작업

node와 kuberntes component에서 사용하는 인증서로 생성 후 생성된 인증서를 각 서버에 분배를 해보겠습니다.

# ca.conf 수정
sed -i 's/system:system:kube-scheduler/system:kube-scheduler/' ca.conf

# 변수 지정
certs=(
  "node-0" "node-1"
  "kube-proxy" "kube-scheduler"
  "kube-controller-manager"
  "kube-api-server"
  "service-accounts"
)

# Certificate request self-signature ok
# ...

# 인증서 분배
for host in node-0 node-1; do
  ssh root@${host} mkdir /var/lib/kubelet/

  scp ca.crt root@${host}:/var/lib/kubelet/

  scp ${host}.crt \
    root@${host}:/var/lib/kubelet/kubelet.crt

  scp ${host}.key \
    root@${host}:/var/lib/kubelet/kubelet.key
done

# server에 인증서 전달
scp \
  ca.key ca.crt \
  kube-api-server.key kube-api-server.crt \
  service-accounts.key service-accounts.crt \
  root@server:~/

4. Kubernetes Config 파일 생성

API Server와 통신이 필요한 Client 인증 설정 파일을 만들어 보겠습니다.

kubelet

kubelet은 특이하게 API Server와 통신 시 인증을 Node Authorization을 사용합니다.

Node Authorization을 사용하는 이유는 최소 권한 원칙을 준수하여 Node 간의 보안 격리를 강화 하기 위해서 입니다.

쉽게 말에 Node가 해킹을 당한 경우, 그 Node의 Kubelet이 Cluster 전체에 대한 모든 권한을 가지고 있다면 그 노드를 통해 Cluster를 해킹 할 수 있습니다.

이러한 보안에 위협되는 문제를 해결하기 위해서, Kubelet 이 Node Authorization으로 API Server와 통신을 할때, 자신의 Node에 대한 기능들(Node 등록, Node의 Pod 권한, Pod에 매핑되어있는 config map, secret)만 접근할 수 있습니다.

# config set-cluster
kubectl config set-cluster kubernetes-the-hard-way \
  --certificate-authority=ca.crt \
  --embed-certs=true \
  --server=https://server.kubernetes.local:6443 \
  --kubeconfig=node-0.kubeconfig && ls -l node-0.kubeconfig && cat node-0.kubeconfig

kubectl config set-cluster kubernetes-the-hard-way \
  --certificate-authority=ca.crt \
  --embed-certs=true \
  --server=https://server.kubernetes.local:6443 \
  --kubeconfig=node-1.kubeconfig && ls -l node-1.kubeconfig && cat node-1.kubeconfig

# -rw------- 1 root root 2758 Jan 10 18:53 node-0.kubeconfig
# apiVersion: v1
# clusters:
# - cluster:
# 인증서 정보 생략

# config set-credentials
kubectl config set-credentials system:node:node-0 \
  --client-certificate=node-0.crt \
  --client-key=node-0.key \
  --embed-certs=true \
  --kubeconfig=node-0.kubeconfig && cat node-0.kubeconfig
kubectl config set-credentials system:node:node-1 \
  --client-certificate=node-1.crt \
  --client-key=node-1.key \
  --embed-certs=true \
  --kubeconfig=node-1.kubeconfig && cat node-1.kubeconfig

#   User "system:node:node-0" set.
# apiVersion: v1
# clusters:
# - cluster:

# set-context : default 추가
kubectl config set-context default \
  --cluster=kubernetes-the-hard-way \
  --user=system:node:node-0 \
  --kubeconfig=node-0.kubeconfig && cat node-0.kubeconfig

kubectl config set-context default \
  --cluster=kubernetes-the-hard-way \
  --user=system:node:node-1 \
  --kubeconfig=node-1.kubeconfig && cat node-1.kubeconfig

# use-context : current-context 에 default 추가
kubectl config use-context default \
  --kubeconfig=node-0.kubeconfig

kubectl config use-context default \
  --kubeconfig=node-1.kubeconfig

kube-proxy

# Generate a kubeconfig file for the kube-proxy service
kubectl config set-cluster kubernetes-the-hard-way \
  --certificate-authority=ca.crt \
  --embed-certs=true \
  --server=https://server.kubernetes.local:6443 \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-credentials system:kube-proxy \
  --client-certificate=kube-proxy.crt \
  --client-key=kube-proxy.key \
  --embed-certs=true \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config set-context default \
  --cluster=kubernetes-the-hard-way \
  --user=system:kube-proxy \
  --kubeconfig=kube-proxy.kubeconfig

kubectl config use-context default \
  --kubeconfig=kube-proxy.kubeconfig

kube-controller-manager

kubectl config set-cluster kubernetes-the-hard-way \
  --certificate-authority=ca.crt \
  --embed-certs=true \
  --server=https://server.kubernetes.local:6443 \
  --kubeconfig=kube-controller-manager.kubeconfig

kubectl config set-credentials system:kube-controller-manager \
  --client-certificate=kube-controller-manager.crt \
  --client-key=kube-controller-manager.key \
  --embed-certs=true \
  --kubeconfig=kube-controller-manager.kubeconfig

kubectl config set-context default \
  --cluster=kubernetes-the-hard-way \
  --user=system:kube-controller-manager \
  --kubeconfig=kube-controller-manager.kubeconfig

kubectl config use-context default \
  --kubeconfig=kube-controller-manager.kubeconfig

kube-scheduler

# Generate a kubeconfig file for the kube-scheduler service
kubectl config set-cluster kubernetes-the-hard-way \
  --certificate-authority=ca.crt \
  --embed-certs=true \
  --server=https://server.kubernetes.local:6443 \
  --kubeconfig=kube-scheduler.kubeconfig

kubectl config set-credentials system:kube-scheduler \
  --client-certificate=kube-scheduler.crt \
  --client-key=kube-scheduler.key \
  --embed-certs=true \
  --kubeconfig=kube-scheduler.kubeconfig

kubectl config set-context default \
  --cluster=kubernetes-the-hard-way \
  --user=system:kube-scheduler \
  --kubeconfig=kube-scheduler.kubeconfig

kubectl config use-context default \
  --kubeconfig=kube-scheduler.kubeconfig

admin

kubectl config set-cluster kubernetes-the-hard-way \
  --certificate-authority=ca.crt \
  --embed-certs=true \
  --server=https://127.0.0.1:6443 \
  --kubeconfig=admin.kubeconfig

kubectl config set-credentials admin \
  --client-certificate=admin.crt \
  --client-key=admin.key \
  --embed-certs=true \
  --kubeconfig=admin.kubeconfig

kubectl config set-context default \
  --cluster=kubernetes-the-hard-way \
  --user=admin \
  --kubeconfig=admin.kubeconfig

kubectl config use-context default \
  --kubeconfig=admin.kubeconfig

설정파일 전달 및 적용

생성한 설정파일들을 각 Node에 전달을 합니다.

# Copy the kubelet and kube-proxy kubeconfig files to the node-0 and node-1 machines
for host in node-0 node-1; do
  ssh root@${host} "mkdir -p /var/lib/{kube-proxy,kubelet}"

  scp kube-proxy.kubeconfig \
    root@${host}:/var/lib/kube-proxy/kubeconfig \

  scp ${host}.kubeconfig \
    root@${host}:/var/lib/kubelet/kubeconfig
done


scp admin.kubeconfig \
  kube-controller-manager.kubeconfig \
  kube-scheduler.kubeconfig \
  root@server:~/

ETCD 에 Secret 저장 시, 암호화 저장을 하도록 설정해 보겠습니다.

# Generate an encryption key
export ENCRYPTION_KEY=$(head -c 32 /dev/urandom | base64)

# Encryption 설정 파일 확인
cat configs/encryption-config.yaml

envsubst < configs/encryption-config.yaml > encryption-config.yaml


# Copy the encryption-config.yaml encryption config file to each controller instance:
scp encryption-config.yaml root@server:~/

5. Kubernetes Component 설치

etcd 설치 및 수행

Server Node에서 etcd서비스를 작동 하겠습니다


# systemd로 수행 시 수행되는 설정값 지정
ETCD_NAME=server
cat > units/etcd.service <<EOF
[Unit]
Description=etcd
Documentation=https://github.com/etcd-io/etcd

[Service]
Type=notify
ExecStart=/usr/local/bin/etcd \\
  --name ${ETCD_NAME} \\
  --initial-advertise-peer-urls http://127.0.0.1:2380 \\
  --listen-peer-urls http://127.0.0.1:2380 \\
  --listen-client-urls http://127.0.0.1:2379 \\
  --advertise-client-urls http://127.0.0.1:2379 \\
  --initial-cluster-token etcd-cluster-0 \\
  --initial-cluster ${ETCD_NAME}=http://127.0.0.1:2380 \\
  --initial-cluster-state new \\
  --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

# Copy etcd binaries and systemd unit files to the server machine
scp \
  downloads/controller/etcd \
  downloads/client/etcdctl \
  units/etcd.service \
  root@server:~/

# 아래는 server 가상머신 접속
ssh root@server

# Etcd 설치
mv etcd etcdctl /usr/local/bin/

# Configure the etcd Server
mkdir -p /etc/etcd /var/lib/etcd
chmod 700 /var/lib/etcd
cp ca.crt kube-api-server.key kube-api-server.crt /etc/etcd/
mv etcd.service /etc/systemd/system/

# Start the etcd Server
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd

# etcd 작동 확인
systemctl status etcd --no-pager
# ● etcd.service - etcd
#      Loaded: loaded (/etc/systemd/system/etcd.service; enabled; preset: enabled)
#      Active: active (running) since Sat 2026-01-10 20:55:20 KST; 4s ago

# etcd member 확인
etcdctl member list
# 6702b0a34e2cfd39, started, server, http://127.0.0.1:2380, http://127.0.0.1:2379, false

# Jump Server 복귀
exit

API Server 및 기타 컴포넌트 설치 및 수행

# systemd 설정 값 생성
cat << EOF > units/kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes

[Service]
ExecStart=/usr/local/bin/kube-apiserver \\
  --allow-privileged=true \\
  --apiserver-count=1 \\
  --audit-log-maxage=30 \\
  --audit-log-maxbackup=3 \\
  --audit-log-maxsize=100 \\
  --audit-log-path=/var/log/audit.log \\
  --authorization-mode=Node,RBAC \\
  --bind-address=0.0.0.0 \\
  --client-ca-file=/var/lib/kubernetes/ca.crt \\
  --enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \\
  --etcd-servers=http://127.0.0.1:2379 \\
  --event-ttl=1h \\
  --encryption-provider-config=/var/lib/kubernetes/encryption-config.yaml \\
  --kubelet-certificate-authority=/var/lib/kubernetes/ca.crt \\
  --kubelet-client-certificate=/var/lib/kubernetes/kube-api-server.crt \\
  --kubelet-client-key=/var/lib/kubernetes/kube-api-server.key \\
  --runtime-config='api/all=true' \\
  --service-account-key-file=/var/lib/kubernetes/service-accounts.crt \\
  --service-account-signing-key-file=/var/lib/kubernetes/service-accounts.key \\
  --service-account-issuer=https://server.kubernetes.local:6443 \\
  --service-cluster-ip-range=10.32.0.0/24 \\
  --service-node-port-range=30000-32767 \\
  --tls-cert-file=/var/lib/kubernetes/kube-api-server.crt \\
  --tls-private-key-file=/var/lib/kubernetes/kube-api-server.key \\
  --v=2
Restart=on-failure
RestartSec=5

[Install]
WantedBy=multi-user.target
EOF

# Connect to the jumpbox and copy Kubernetes binaries and systemd unit files to the server machine
scp \
  downloads/controller/kube-apiserver \
  downloads/controller/kube-controller-manager \
  downloads/controller/kube-scheduler \
  downloads/client/kubectl \
  units/kube-apiserver.service \
  units/kube-controller-manager.service \
  units/kube-scheduler.service \
  configs/kube-scheduler.yaml \
  configs/kube-apiserver-to-kubelet.yaml \
  root@server:~/


# Server Node 접속
ssh root@server

# Directory 생성
mkdir -p /etc/kubernetes/config


# Install the Kubernetes binaries
mv kube-apiserver \
  kube-controller-manager \
  kube-scheduler kubectl \
  /usr/local/bin/

# Configure the Kubernetes API Server
mkdir -p /var/lib/kubernetes/
mv ca.crt ca.key \
  kube-api-server.key kube-api-server.crt \
  service-accounts.key service-accounts.crt \
  encryption-config.yaml \
  /var/lib/kubernetes/

## Create the kube-apiserver.service systemd unit file:
mv kube-apiserver.service \
  /etc/systemd/system/kube-apiserver.service

## Move the kube-controller-manager kubeconfig into place:
mv kube-controller-manager.kubeconfig /var/lib/kubernetes/

## Create the kube-controller-manager.service systemd unit file:
mv kube-controller-manager.service /etc/systemd/system/


# Configure the Kubernetes Scheduler

## Move the kube-scheduler kubeconfig into place:
mv kube-scheduler.kubeconfig /var/lib/kubernetes/

## Create the kube-scheduler.yaml configuration file:
mv kube-scheduler.yaml /etc/kubernetes/config/

## Create the kube-scheduler.service systemd unit file:
mv kube-scheduler.service /etc/systemd/system/


# Start the Controller Services : Allow up to 10 seconds for the Kubernetes API Server to fully initialize.
systemctl daemon-reload
systemctl enable kube-apiserver kube-controller-manager kube-scheduler
systemctl start  kube-apiserver kube-controller-manager kube-scheduler

# 확인
ss -tlp | grep kube
# LISTEN 0      4096               *:6443              *:*    users:(("kube-apiserver",pid=3041,fd=3))
# LISTEN 0      4096               *:10257             *:*    users:(("kube-controller",pid=3042,fd=3))
# LISTEN 0      4096               *:10259             *:*    users:(("kube-scheduler",pid=3043,fd=3))

kubectl get service,ep --kubeconfig admin.kubeconfig
# NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
# service/kubernetes   ClusterIP   10.32.0.1    <none>        443/TCP   82s

# NAME                   ENDPOINTS        AGE
# endpoints/kubernetes   10.0.2.15:6443   82s

# api -> kubelet 접속을 위한 RBAC 설정
kubectl apply -f kube-apiserver-to-kubelet.yaml --kubeconfig admin.kubeconfig

# Jump 서버 복귀
eixt

# Jump 서버에서 Control Plane 확인
curl -s -k --cacert ca.crt https://server.kubernetes.local:6443/version | jq
# {
#   "major": "1",
#   "minor": "32",
#   "gitVersion": "v1.32.3",
#   "gitCommit": "32cc146f75aad04beaaa245a7157eb35063a9f99",
#   "gitTreeState": "clean",
#   "buildDate": "2025-03-11T19:52:21Z",
#   "goVersion": "go1.23.6",
#   "compiler": "gc",
#   "platform": "linux/amd64"
# }

6. Worker Node 구성

Worker Node 구성 시 필요한 정보 및 설치를 진행 하겠습니다.

Worker Node 구성

# cni(bridge) 파일과 kubelet-config 파일 확인
cat configs/10-bridge.conf | jq
cat configs/kubelet-config.yaml | y

# config 파일 Target Node 에 전달

for HOST in node-0 node-1; do
  SUBNET=$(grep ${HOST} machines.txt | cut -d " " -f 4)
  sed "s|SUBNET|$SUBNET|g" \
    configs/10-bridge.conf > 10-bridge.conf

  sed "s|SUBNET|$SUBNET|g" \
    configs/kubelet-config.yaml > kubelet-config.yaml

  scp 10-bridge.conf kubelet-config.yaml \
  root@${HOST}:~/
done


# containerd 및 kube-proxy 파일 전달
for HOST in node-0 node-1; do
  scp \
    downloads/worker/* \
    downloads/client/kubectl \
    configs/99-loopback.conf \
    configs/containerd-config.toml \
    configs/kube-proxy-config.yaml \
    units/containerd.service \
    units/kubelet.service \
    units/kube-proxy.service \
    root@${HOST}:~/
done

for HOST in node-0 node-1; do
  scp \
    downloads/cni-plugins/* \
    root@${HOST}:~/cni-plugins/
done

# node-0/1 접속
ssh root@node-0
ssh root@node-1

# Install the OS dependencies : The socat binary enables support for the kubectl port-forward command.
apt-get -y install socat conntrack ipset kmod psmisc bridge-utils

# Disable Swap : Verify if swap is disabled:
swapon --show

# Create the installation directories
mkdir -p \
  /etc/cni/net.d \
  /opt/cni/bin \
  /var/lib/kubelet \
  /var/lib/kube-proxy \
  /var/lib/kubernetes \
  /var/run/kubernetes

# Install the worker binaries:
mv crictl kube-proxy kubelet runc /usr/local/bin/
mv containerd containerd-shim-runc-v2 containerd-stress /bin/
mv cni-plugins/* /opt/cni/bin/


# Configure CNI Networking

## Create the bridge network configuration file:
mv 10-bridge.conf 99-loopback.conf /etc/cni/net.d/

## To ensure network traffic crossing the CNI bridge network is processed by iptables, load and configure the br-netfilter kernel module:
lsmod | grep netfilter
modprobe br-netfilter
echo "br-netfilter" >> /etc/modules-load.d/modules.conf
lsmod | grep netfilter

echo "net.bridge.bridge-nf-call-iptables = 1"  >> /etc/sysctl.d/kubernetes.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.d/kubernetes.conf
sysctl -p /etc/sysctl.d/kubernetes.conf


# Configure containerd : Install the containerd configuration files:
mkdir -p /etc/containerd/
mv containerd-config.toml /etc/containerd/config.toml
mv containerd.service /etc/systemd/system/


# Configure the Kubelet : Create the kubelet-config.yaml configuration file:
mv kubelet-config.yaml /var/lib/kubelet/
mv kubelet.service /etc/systemd/system/


# Configure the Kubernetes Proxy
mv kube-proxy-config.yaml /var/lib/kube-proxy/
mv kube-proxy.service /etc/systemd/system/


# Start the Worker Services
systemctl daemon-reload
systemctl enable containerd kubelet kube-proxy
systemctl start containerd kubelet kube-proxy


# 확인
systemctl status kubelet --no-pager
systemctl status containerd --no-pager
systemctl status kube-proxy --no-pager


# Jump Server 복귀
exit

# jumpbox 에서 server 접속하여 kubectl node 정보 확인
ssh server "kubectl get nodes -owide --kubeconfig admin.kubeconfig"


# NAME     STATUS   ROLES    AGE    VERSION   INTERNAL-IP      EXTERNAL-IP   OS-IMAGE                         KERNEL-VERSION   CONTAINER-RUNTIME
# node-0   Ready    <none>   106s   v1.32.3   192.168.10.101   <none>        Debian GNU/Linux 12 (bookworm)   6.1.0-23-amd64   containerd://2.1.0-beta.0
# node-1   Ready    <none>   21s    v1.32.3   192.168.10.102   <none>        Debian GNU/Linux 12 (bookworm)   6.1.0-23-amd64   containerd://2.1.0-beta.0

7. Cluster 구성 및 확인

Pod Network Routing 적용

#  jumpbox 노드에서 kubectl 을 admin 자격증명으로 사용
# Generate a kubeconfig file suitable for authenticating as the admin user:
kubectl config set-cluster kubernetes-the-hard-way \
  --certificate-authority=ca.crt \
  --embed-certs=true \
  --server=https://server.kubernetes.local:6443

kubectl config set-credentials admin \
  --client-certificate=admin.crt \
  --client-key=admin.key

kubectl config set-context kubernetes-the-hard-way \
  --cluster=kubernetes-the-hard-way \
  --user=admin

kubectl config use-context kubernetes-the-hard-way


# Print the internal IP address and Pod CIDR range for each worker instance:
SERVER_IP=$(grep server machines.txt | cut -d " " -f 1)
NODE_0_IP=$(grep node-0 machines.txt | cut -d " " -f 1)
NODE_0_SUBNET=$(grep node-0 machines.txt | cut -d " " -f 4)
NODE_1_IP=$(grep node-1 machines.txt | cut -d " " -f 1)
NODE_1_SUBNET=$(grep node-1 machines.txt | cut -d " " -f 4)
echo $SERVER_IP $NODE_0_IP $NODE_0_SUBNET $NODE_1_IP $NODE_1_SUBNET

ssh server ip -c route
ssh root@server <<EOF
  ip route add ${NODE_0_SUBNET} via ${NODE_0_IP}
  ip route add ${NODE_1_SUBNET} via ${NODE_1_IP}
EOF
ssh root@node-0 <<EOF
  ip route add ${NODE_1_SUBNET} via ${NODE_1_IP}
EOF
ssh root@node-1 <<EOF
  ip route add ${NODE_0_SUBNET} via ${NODE_0_IP}
EOF

Deployments, Node Ports 확인

# nginx 배포
kubectl create deployment nginx --image=nginx:latest
kubectl scale deployment nginx --replicas=2
kubectl get pod -owide
# NAME                     READY   STATUS    RESTARTS   AGE   IP           NODE     NOMINATED NODE   READINESS GATES
# nginx-54c98b4f84-gcb8p   1/1     Running   0          33s   10.200.0.2   node-0   <none>           <none>
# nginx-54c98b4f84-whwqd   1/1     Running   0          33s   10.200.1.2   node-1   <none>           <none>


# server 노드에서 파드 IP로 호출 확인
ssh server curl -s 10.200.1.2 | grep title
ssh server curl -s 10.200.0.2 | grep title

# <title>Welcome to nginx!</title>
# <title>Welcome to nginx!</title>

# Retrieve the node port assigned to the nginx service:
NODE_PORT=$(kubectl get svc nginx --output=jsonpath='{range .spec.ports[0]}{.nodePort}')
echo $NODE_PORT

# Make an HTTP request using the IP address and the nginx node port:
curl -s -I http://node-0:${NODE_PORT}
curl -s -I http://node-1:${NODE_PORT}

# Service
kubectl expose deploy nginx --port 80 --type NodePort


# 정보확인
kubectl get service,ep nginx
# NAME            TYPE       CLUSTER-IP    EXTERNAL-IP   PORT(S)        AGE
# service/nginx   NodePort   10.32.0.244   <none>        80:32583/TCP   58s

# NAME              ENDPOINTS                     AGE
# endpoints/nginx   10.200.0.2:80,10.200.1.2:80   58s

# Retrieve the node port assigned to the nginx service:
NODE_PORT=$(kubectl get svc nginx --output=jsonpath='{range .spec.ports[0]}{.nodePort}')
echo $NODE_PORT


# Make an HTTP request using the IP address and the nginx node port:
curl -s -I http://node-0:${NODE_PORT}
curl -s -I http://node-1:${NODE_PORT}
# HTTP/1.1 200 OK
# Server: nginx/1.29.4
# Date: Sat, 10 Jan 2026 14:32:17 GMT
# Content-Type: text/html
# Content-Length: 615
# Last-Modified: Tue, 09 Dec 2025 18:28:10 GMT
# Connection: keep-alive
# ETag: "69386a3a-267"
# Accept-Ranges: bytes

# HTTP/1.1 200 OK
# Server: nginx/1.29.4
# Date: Sat, 10 Jan 2026 14:32:17 GMT
# Content-Type: text/html
# Content-Length: 615
# Last-Modified: Tue, 09 Dec 2025 18:28:10 GMT
# Connection: keep-alive
# ETag: "69386a3a-267"
# Accept-Ranges: bytes

실습 리소스 삭제

# (가상머신에서 빠져나온 후) VM 삭제
exit
exit

## Windows
vagrant destroy -f 
rmdir .vagrant


# kind k8s 삭제
kind delete cluster --name myk8s

'DevOps > Study' 카테고리의 다른 글

CI/CD Study 8주차 Vault (2/2)  (0) 2025.12.13
CI/CD Study 8주차 Vault (1/2)  (0) 2025.12.13
CI/CD Study 7주차 Vault (2/2)  (0) 2025.11.29
CI/CD Study 7주차 Vault (1/2)  (0) 2025.11.29
CI/CD Study 6주차 ArgoCD 3/3 (1/2)  (0) 2025.11.23