Difference between revisions of "R-Car/k8s-draft"

From eLinux.org
Jump to: navigation, search
(master2設定)
(4 intermediate revisions by the same user not shown)
Line 157: Line 157:
 
Podネットワークプラグインをインストールする。本手順では Calico を使用する。
 
Podネットワークプラグインをインストールする。本手順では Calico を使用する。
  
  # kubectl apply -f https://docs.projectcalico.org/v3.17/manifests/calico.yaml
+
  # kubectl apply -f https://docs.projectcalico.org/v3.15/manifests/calico.yaml
 
  configmap/calico-config created
 
  configmap/calico-config created
 
  customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
 
  customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
Line 196: Line 196:
 
2つめのコントロールプレーンノードを初期化する。
 
2つめのコントロールプレーンノードを初期化する。
 
master1 のノードを初期化したときに表示されるコントロールプレーンノード用のコマンドを実行する。(ハッシュ値は環境によって異なるので、環境に合わせて変更する)
 
master1 のノードを初期化したときに表示されるコントロールプレーンノード用のコマンドを実行する。(ハッシュ値は環境によって異なるので、環境に合わせて変更する)
 +
--node-name master2 を追加
  
 
   kubeadm join 192.168.179.52:6443 --token 21sx8f.t536gdy7uzhk5o2o \
 
   kubeadm join 192.168.179.52:6443 --token 21sx8f.t536gdy7uzhk5o2o \
 
         --discovery-token-ca-cert-hash sha256:232e02ecc69e4ba4bf5806d6ae7cba591be6b67e4de3973597c069c0a9fc1be1 \
 
         --discovery-token-ca-cert-hash sha256:232e02ecc69e4ba4bf5806d6ae7cba591be6b67e4de3973597c069c0a9fc1be1 \
         --control-plane --certificate-key a7dbea8c50522416fc30be35a8cfd2b72c60d2540c74e6bad5832e3dcf3ff9c9
+
         --control-plane --certificate-key a7dbea8c50522416fc30be35a8cfd2b72c60d2540c74e6bad5832e3dcf3ff9c9 --node-name master2
  
 
   
 
   
Line 209: Line 210:
 
  cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
 
  cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
 
  chown $(id -u):$(id -g) $HOME/.kube/config
 
  chown $(id -u):$(id -g) $HOME/.kube/config
 +
 +
 +
=== worker1設定 ===
 +
 +
 +
=== worker2設定 ===
 +
 +
 +
 +
=== k8sクラスタの状態確認 ===
 +
 +
# kubectl get pods -A -o wide
 +
NAMESPACE    NAME                                      READY  STATUS    RESTARTS  AGE  IP              NODE      NOMINATED NODE  READINESS GATES
 +
kube-system  calico-kube-controllers-7d66c56c96-hh5tx  1/1    Running  0          40m  172.16.137.66    master1  <none>          <none>
 +
kube-system  calico-node-55ndb                          0/1    Running  0          25m  192.168.179.49  master2  <none>          <none>
 +
kube-system  calico-node-8lrcm                          1/1    Running  0          14m  192.168.179.50  work1    <none>          <none>
 +
kube-system  calico-node-9nsv5                          0/1    Running  0          40m  192.168.179.48  master1  <none>          <none>
 +
kube-system  calico-node-s2tnv                          0/1    Running  0          13m  192.168.179.51  work2    <none>          <none>
 +
kube-system  coredns-f9fd979d6-5f4pj                    1/1    Running  0          40m  172.16.137.65    master1  <none>          <none>
 +
kube-system  coredns-f9fd979d6-pd7lm                    1/1    Running  0          40m  172.16.137.67    master1  <none>          <none>
 +
kube-system  etcd-master1                              1/1    Running  1          40m  192.168.179.48  master1  <none>          <none>
 +
kube-system  etcd-master2                              1/1    Running  0          22m  192.168.179.49  master2  <none>          <none>
 +
kube-system  kube-apiserver-master1                    1/1    Running  4          40m  192.168.179.48  master1  <none>          <none>
 +
kube-system  kube-apiserver-master2                    1/1    Running  0          22m  192.168.179.49  master2  <none>          <none>
 +
kube-system  kube-controller-manager-master1            1/1    Running  1          40m  192.168.179.48  master1  <none>          <none>
 +
kube-system  kube-controller-manager-master2            1/1    Running  0          22m  192.168.179.49  master2  <none>          <none>
 +
kube-system  kube-proxy-d8gm5                          1/1    Running  0          14m  192.168.179.50  work1    <none>          <none>
 +
kube-system  kube-proxy-gq6l4                          1/1    Running  0          40m  192.168.179.48  master1  <none>          <none>
 +
kube-system  kube-proxy-klhmd                          1/1    Running  1          13m  192.168.179.51  work2    <none>          <none>
 +
kube-system  kube-proxy-l8w5k                          1/1    Running  0          25m  192.168.179.49  master2  <none>          <none>
 +
kube-system  kube-scheduler-master1                    1/1    Running  1          40m  192.168.179.48  master1  <none>          <none>
 +
kube-system  kube-scheduler-master2                    1/1    Running  0          22m  192.168.179.49  master2  <none>          <none>
 +
 +
 +
== Pod展開手順 ==
 +
 +
# kubectl apply -f app.yml
 +
deployment.apps/app created
 +
service/web-service created
 +
 +
# kubectl get pods -o wide
 +
NAME                  READY  STATUS    RESTARTS  AGE  IP            NODE    NOMINATED NODE  READINESS GATES
 +
app-849858b7fd-2cd6d  1/1    Running  0          23m  172.16.215.2  work1  <none>          <none>
 +
app-849858b7fd-hc82t  1/1    Running  0          23m  172.16.123.5  work2  <none>          <none>
 +
app-849858b7fd-k9twv  1/1    Running  0          23m  172.16.215.5  work1  <none>          <none>
 +
app-849858b7fd-kpwq8  1/1    Running  0          23m  172.16.215.4  work1  <none>          <none>
 +
app-849858b7fd-mt77h  1/1    Running  0          23m  172.16.123.1  work2  <none>          <none>
 +
app-849858b7fd-q4pz7  1/1    Running  0          23m  172.16.123.2  work2  <none>          <none>
 +
app-849858b7fd-q7w6m  1/1    Running  0          23m  172.16.123.3  work2  <none>          <none>
 +
app-849858b7fd-qv4t6  1/1    Running  0          23m  172.16.123.4  work2  <none>          <none>
 +
app-849858b7fd-sncpg  1/1    Running  0          23m  172.16.215.1  work1  <none>          <none>
 +
app-849858b7fd-xvhkz  1/1    Running  0          23m  172.16.215.3  work1  <none>          <none>

Revision as of 06:20, 16 April 2021

Heading text

3.1. k8sクラスタ構成手順

HAProxy用1台、master node 用 2台、worker node 用 2台 をそれぞれ設定し、k8sクラスタを構成する手順を説明する。 事前にすべてのR-Carを起動し、同一ネットワークに接続しておく。 (本手順は環境にもよるが20~30分以上かかる。)

HAProxy設定

R-Car (HAProxy) の設定手順を説明する。本手順はR-Car (HAProxy)上で実施する。

/etc/haproxy/haproxy.cfg を開いて、最後尾に以下の設定を追記する。 master1, master2 のIPアドレスは環境に合わせて変更する。

・・・
#---------------------------------------------------------------------
# main frontend which proxys to the backends
#---------------------------------------------------------------------
frontend kubernetes
    bind *:6443
    option tcplog
    mode tcp
    default_backend kubernetes-master-nodes
 
#---------------------------------------------------------------------
# round robin balancing between the various backends
#---------------------------------------------------------------------
backend kubernetes-master-nodes
    mode tcp
    balance     roundrobin
    option tcp-check
    server  master1 192.168.179.48:6443 check
    server  master2 192.168.179.49:6443 check

HAProxy を起動する

systemctl start haproxy

master1設定

R-Car (master1) の設定手順を説明する。本手順はR-Car (master1)上で実施する。


WARNING!
デフォルトではブロックデバイス上(/var/lib/etcd) にデータストアが作成されるが、データストアへアクセス時のタイムアウトエラーが多く発生して動作が安定しない。本手順では、アクセス速度を速くするために、データストアの場所をRAM上に変更する(tmpfsでマウントする)。kubeadm の --config オプションを使ってデータストアのパス変更もできるが、--config オプションは他のオプションと併用できない為、今回は次の方法を取る。

Kubernetes のデータストア(etcd)をRAM上にマウントする。

mkdir /var/lib/etcd
mount -t tmpfs tmpfs /var/lib/etcd

HAProxy サーバのIPアドレスを設定する。(IPアドレスは環境に合わせて変更する。)

LOAD_BALANCER_DNS=192.168.179.52
LOAD_BALANCER_PORT=6443

1つめのコントロールプレーンノードを初期化する。

# kubeadm init --control-plane-endpoint "${LOAD_BALANCER_DNS}:${LOAD_BALANCER_PORT}" --upload-certs --node-name master1
[init] Using Kubernetes version: v1.21.0
[preflight] Running pre-flight checks
        [WARNING Hostname]: hostname "master1" could not be reached
        [WARNING Hostname]: hostname "master1": lookup master1 on 192.168.179.1:53: no such host
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
^[[A^[[A^[[A^[[A^[[B^[[B^[[B^[[B

[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local master1] and IPs [10.96.0.1 192.168.179.48 192.168.179.52]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost master1] and IPs [192.168.179.48 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost master1] and IPs [192.168.179.48 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[kubelet-check] Initial timeout of 40s passed.
[apiclient] All control plane components are healthy after 92.558093 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.21" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
a7dbea8c50522416fc30be35a8cfd2b72c60d2540c74e6bad5832e3dcf3ff9c9
[mark-control-plane] Marking the node master1 as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node master1 as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 21sx8f.t536gdy7uzhk5o2o
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join 192.168.179.52:6443 --token 21sx8f.t536gdy7uzhk5o2o \
        --discovery-token-ca-cert-hash sha256:232e02ecc69e4ba4bf5806d6ae7cba591be6b67e4de3973597c069c0a9fc1be1 \
        --control-plane --certificate-key a7dbea8c50522416fc30be35a8cfd2b72c60d2540c74e6bad5832e3dcf3ff9c9

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.179.52:6443 --token 21sx8f.t536gdy7uzhk5o2o \
        --discovery-token-ca-cert-hash sha256:232e02ecc69e4ba4bf5806d6ae7cba591be6b67e4de3973597c069c0a9fc1be1

上記表示にも説明があるように、以下の設定を行う。これでmaster1 で kubectrl が使えるようになる。

mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

Podネットワークプラグインをインストールする。本手順では Calico を使用する。

# kubectl apply -f https://docs.projectcalico.org/v3.15/manifests/calico.yaml
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
serviceaccount/calico-node created
deployment.apps/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
Warning: policy/v1beta1 PodDisruptionBudget is deprecated in v1.21+, unavailable in v1.25+; use policy/v1 PodDisruptionBudget
poddisruptionbudget.policy/calico-kube-controllers created

master2設定

R-Car (master2) の設定手順を説明する。本手順はR-Car (master2)上で実施する。


Kubernetes のデータストア(etcd)をRAM上にマウントする。

mkdir /var/lib/etcd
mount -t tmpfs tmpfs /var/lib/etcd

2つめのコントロールプレーンノードを初期化する。 master1 のノードを初期化したときに表示されるコントロールプレーンノード用のコマンドを実行する。(ハッシュ値は環境によって異なるので、環境に合わせて変更する)

--node-name master2 を追加
 kubeadm join 192.168.179.52:6443 --token 21sx8f.t536gdy7uzhk5o2o \
       --discovery-token-ca-cert-hash sha256:232e02ecc69e4ba4bf5806d6ae7cba591be6b67e4de3973597c069c0a9fc1be1 \
       --control-plane --certificate-key a7dbea8c50522416fc30be35a8cfd2b72c60d2540c74e6bad5832e3dcf3ff9c9 --node-name master2



以下の設定を行う。これでmaster1 で kubectrl が使えるようになる。

mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config


worker1設定

worker2設定

k8sクラスタの状態確認

# kubectl get pods -A -o wide
NAMESPACE     NAME                                       READY   STATUS    RESTARTS   AGE   IP               NODE      NOMINATED NODE   READINESS GATES
kube-system   calico-kube-controllers-7d66c56c96-hh5tx   1/1     Running   0          40m   172.16.137.66    master1   <none>           <none>
kube-system   calico-node-55ndb                          0/1     Running   0          25m   192.168.179.49   master2   <none>           <none>
kube-system   calico-node-8lrcm                          1/1     Running   0          14m   192.168.179.50   work1     <none>           <none>
kube-system   calico-node-9nsv5                          0/1     Running   0          40m   192.168.179.48   master1   <none>           <none>
kube-system   calico-node-s2tnv                          0/1     Running   0          13m   192.168.179.51   work2     <none>           <none>
kube-system   coredns-f9fd979d6-5f4pj                    1/1     Running   0          40m   172.16.137.65    master1   <none>           <none>
kube-system   coredns-f9fd979d6-pd7lm                    1/1     Running   0          40m   172.16.137.67    master1   <none>           <none>
kube-system   etcd-master1                               1/1     Running   1          40m   192.168.179.48   master1   <none>           <none>
kube-system   etcd-master2                               1/1     Running   0          22m   192.168.179.49   master2   <none>           <none>
kube-system   kube-apiserver-master1                     1/1     Running   4          40m   192.168.179.48   master1   <none>           <none>
kube-system   kube-apiserver-master2                     1/1     Running   0          22m   192.168.179.49   master2   <none>           <none>
kube-system   kube-controller-manager-master1            1/1     Running   1          40m   192.168.179.48   master1   <none>           <none>
kube-system   kube-controller-manager-master2            1/1     Running   0          22m   192.168.179.49   master2   <none>           <none>
kube-system   kube-proxy-d8gm5                           1/1     Running   0          14m   192.168.179.50   work1     <none>           <none>
kube-system   kube-proxy-gq6l4                           1/1     Running   0          40m   192.168.179.48   master1   <none>           <none>
kube-system   kube-proxy-klhmd                           1/1     Running   1          13m   192.168.179.51   work2     <none>           <none>
kube-system   kube-proxy-l8w5k                           1/1     Running   0          25m   192.168.179.49   master2   <none>           <none>
kube-system   kube-scheduler-master1                     1/1     Running   1          40m   192.168.179.48   master1   <none>           <none>
kube-system   kube-scheduler-master2                     1/1     Running   0          22m   192.168.179.49   master2   <none>           <none>


Pod展開手順

# kubectl apply -f app.yml
deployment.apps/app created
service/web-service created
# kubectl get pods -o wide
NAME                   READY   STATUS    RESTARTS   AGE   IP             NODE    NOMINATED NODE   READINESS GATES
app-849858b7fd-2cd6d   1/1     Running   0          23m   172.16.215.2   work1   <none>           <none>
app-849858b7fd-hc82t   1/1     Running   0          23m   172.16.123.5   work2   <none>           <none>
app-849858b7fd-k9twv   1/1     Running   0          23m   172.16.215.5   work1   <none>           <none>
app-849858b7fd-kpwq8   1/1     Running   0          23m   172.16.215.4   work1   <none>           <none>
app-849858b7fd-mt77h   1/1     Running   0          23m   172.16.123.1   work2   <none>           <none>
app-849858b7fd-q4pz7   1/1     Running   0          23m   172.16.123.2   work2   <none>           <none>
app-849858b7fd-q7w6m   1/1     Running   0          23m   172.16.123.3   work2   <none>           <none>
app-849858b7fd-qv4t6   1/1     Running   0          23m   172.16.123.4   work2   <none>           <none>
app-849858b7fd-sncpg   1/1     Running   0          23m   172.16.215.1   work1   <none>           <none>
app-849858b7fd-xvhkz   1/1     Running   0          23m   172.16.215.3   work1   <none>           <none>