k8s

Centos7安装k8s

Posted by qijun on January 7, 2022
#使用阿里镜像
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#yum安装
yum install -y kubelet kubeadm kubectl

#关闭swap
swapoff -a #临时
sed -ri 's/.*swap.*/#&/' /etc/fstab

#关闭selinux
[root@localhost ~]# sestatus
SELinux root directory:         /etc/selinux
##书上是
sed -i 's/SELINUX=permissive/SELINUX=disabled/' /etc/sysconfig/selinux
##我的
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
setenforce 0 #临时关闭

#关闭防火墙
systemctl stop firewalld.service
systemctl disable firewalld.service

#开启docker和kubelet服务
systemctl enable docker.service
systemctl enable kubelet.service

#设置内核参数
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF

sysctl --system

执行初始化命令:

kubeadm init --image-repository=registry.aliyuncs.com/google_containers

出错。重制kubeadm:

kubeadm reset

查看错误日志:

tail /var/log/messages

修复:

vi /usr/lib/systemd/system/docker.service

ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock =》 ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --exec-opt native.cgroupdriver=systemd

systemctl daemon-reload && systemctl restart docker

再次:

kubeadm init --image-repository=registry.aliyuncs.com/google_containers

成功提示:

Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 10.10.0.150:6443 --token 3lv5c7.ktx6y7afyhkbrqgg \
	--discovery-token-ca-cert-hash sha256:b12b41ab0eeea4c144fbf3ef1a61bf5783775395a25a4302269f575fd538d1a6

按提示:

export KUBECONFIG=/etc/kubernetes/admin.conf

查看节点:

[root@localhost ~]# kubectl get nodes
NAME                    STATUS     ROLES                  AGE     VERSION
localhost.localdomain   NotReady   control-plane,master   7m35s   v1.23.1

看到status是NotReady状态。别慌,这是因为网络插件未部署

[root@localhost ~]# kubectl get pods -n kube-system
NAME                                            READY   STATUS    RESTARTS   AGE
coredns-6d8c4cb4d-hkvdm                         0/1     Pending   0          10m
coredns-6d8c4cb4d-w7dxf                         0/1     Pending   0          10m
etcd-localhost.localdomain                      1/1     Running   0          10m
kube-apiserver-localhost.localdomain            1/1     Running   0          10m
kube-controller-manager-localhost.localdomain   1/1     Running   0          10m
kube-proxy-mbt5h                                1/1     Running   0          10m
kube-scheduler-localhost.localdomain            1/1     Running   0          10m

部署之:

kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')"

等待几分钟,终于成功啦:

[root@localhost ~]# kubectl get pods -n kube-system
NAME                                            READY   STATUS    RESTARTS        AGE
coredns-6d8c4cb4d-hkvdm                         1/1     Running   0               23m
coredns-6d8c4cb4d-w7dxf                         1/1     Running   0               23m
etcd-localhost.localdomain                      1/1     Running   0               24m
kube-apiserver-localhost.localdomain            1/1     Running   0               24m
kube-controller-manager-localhost.localdomain   1/1     Running   0               24m
kube-proxy-mbt5h                                1/1     Running   0               23m
kube-scheduler-localhost.localdomain            1/1     Running   0               24m
weave-net-6dr5p                                 2/2     Running   1 (4m11s ago)   10m

编写自己的第一个应用:

kubectl create deployment web --image=nginx -o yaml --dry-run > nginx.yaml
[root@localhost k8s]# cat nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  creationTimestamp: null
  labels:
    app: web
  name: web
spec:
  replicas: 1
  selector:
    matchLabels:
      app: web
  strategy: {}
  template:
    metadata:
      creationTimestamp: null
      labels:
        app: web
    spec:
      containers:
      - image: nginx
        name: nginx
        resources: {}
status: {}

生成:

kubectl create -f nginx.yaml

没有成功

[root@localhost k8s]# kubectl get pods
NAME                   READY   STATUS    RESTARTS   AGE
web-76b56fd968-tmmk2   0/1     Pending   0          15s

查看pod event:

[root@localhost k8s]# kubectl describe pod web-76b56fd968-tmmk2
Name:           web-76b56fd968-tmmk2
Namespace:      default
Priority:       0
Node:           <none>
Labels:         app=web
                pod-template-hash=76b56fd968
Annotations:    <none>
Status:         Pending
IP:             
IPs:            <none>
Controlled By:  ReplicaSet/web-76b56fd968
Containers:
  nginx:
    Image:        nginx
    Port:         <none>
    Host Port:    <none>
    Environment:  <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-lqszm (ro)
Conditions:
  Type           Status
  PodScheduled   False 
Volumes:
  kube-api-access-lqszm:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason            Age   From               Message
  ----     ------            ----  ----               -------
  Warning  FailedScheduling  85s   default-scheduler  0/1 nodes are available: 1 node(s) had taint {node-role.kubernetes.io/master: }, that the pod didn't tolerate.

一般master节点上不允许运行客户pod,改之:

kubectl taint nodes --all node-role.kubernetes.io/master-

pod启动成功了,进入这个pod看看:

kubectl exec -it web-76b56fd968-tmmk2 -- /bin/bash