# 关闭 firewalld 服务。
[root@localhost ~]# systemctl stop firewalld
[root@localhost ~]# systemctl disable firewalld
Removed symlink /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed symlink /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@localhost ~]# systemctl stop iptables
Failed to stop iptables.service: Unit iptables.service not loaded.
[root@localhost ~]# systemctl disable iptables
Failed to execute operation: No such file or directory
2.6.5 禁用 selinux。
selinux 是 linux 系统下的一个安全服务,如果不关闭它,在安装集群中会产生各种各样的奇葩问题。
[root@localhost ~]# getenforce
Enforcing
vim /etc/selinux/config
# 编辑 /etc/selinux/config 文件,修改 SELINUX 的值为 disable。
# 注意修改完毕之后需要重启 linux 服务。
SELINUX=disabled
# This file controls the state of SELinux on the system.
# SELINUX= can take one of these three values:
# enforcing - SELinux security policy is enforced.
# permissive - SELinux prints warnings instead of enforcing.
# disabled - No SELinux policy is loaded.
# SELINUX=enforcing
SELINUX=disabled
# SELINUXTYPE= can take one of three values:
# targeted - Targeted processes are protected,
# minimum - Modification of targeted policy. Only selected processes are protected.
# mls - Multi Level Security protection.
SELINUXTYPE=targeted
# 编辑分区配置文件 /etc/fstab,注释掉 swap 分区一行。
# 注意修改完毕之后需要重启 linux 服务。
vim /etc/fstab
# 注释掉 /dev/mapper/centos-swap swap。
# /dev/mapper/centos-swap swap
[root@localhost ~]# vim /etc/fstab
#
#
# /etc/fstab
# Created by anaconda on Thu Dec 1 01:25:13 2022
#
# /etc/fstab
# Created by anaconda on Thu Dec 1 01:25:13 2022
#
# Accessible filesystems, by reference, are maintained under '/dev/disk'
# See man pages fstab(5), findfs(8), mount(8) and/or blkid(8) for more info
#
/dev/mapper/centos_localhost-root / xfs defaults 0 0
UUID=c393aaa9-2e37-4fa2-8b32-e8b88af1e576 /boot xfs defaults 0 0
/dev/mapper/centos_localhost-home /home xfs defaults 0 0
# /dev/mapper/centos_localhost-swap swap swap defaults 0 0
# 在安装 kubernetes 集群之前,必须要提前准备好集群需要的镜像,所需镜像可以通过下面命令查看。
[root@localhost ~]# kubeadm config images list
W1201 23:46:05.541002 55376 version.go:101] could not fetch a Kubernetes version from the internet: unable to get URL "https://dl.k8s.io/release/stable-1.txt": Get https://storage.googleapis.com/kubernetes-release/release/stable-1.txt: net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
W1201 23:46:05.541156 55376 version.go:102] falling back to the local client version: v1.17.4
W1201 23:46:05.541308 55376 validation.go:28] Cannot validate kube-proxy config - no validator is available
W1201 23:46:05.541316 55376 validation.go:28] Cannot validate kubelet config - no validator is available
k8s.gcr.io/kube-apiserver:v1.17.4
k8s.gcr.io/kube-controller-manager:v1.17.4
k8s.gcr.io/kube-scheduler:v1.17.4
k8s.gcr.io/kube-proxy:v1.17.4
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.4.3-0
k8s.gcr.io/coredns:1.6.5
[root@localhost ~]#
# 下载镜像。
# 此镜像 kubernetes 的仓库中,由于网络原因,无法连接,下面提供了一种替换方案。
images=(
kube-apiserver:v1.17.4
kube-controller-manager:v1.17.4
kube-scheduler:v1.17.4
kube-proxy:v1.17.4
pause:3.1
etcd:3.4.3-0
coredns:1.6.5
)
for imageName in ${images[@]};do
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
done
[root@localhost ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
k8s.gcr.io/kube-proxy v1.17.4 6dec7cfde1e5 2 years ago 116MB
k8s.gcr.io/kube-apiserver v1.17.4 2e1ba57fe95a 2 years ago 171MB
k8s.gcr.io/kube-controller-manager v1.17.4 7f997fcf3e94 2 years ago 161MB
k8s.gcr.io/kube-scheduler v1.17.4 5db16c1c7aff 2 years ago 94.4MB
k8s.gcr.io/coredns 1.6.5 70f311871ae1 3 years ago 41.6MB
k8s.gcr.io/etcd 3.4.3-0 303ce5db0e90 3 years ago 288MB
k8s.gcr.io/pause 3.1 da86e6ba6ca1 4 years ago 742kB
# 创建集群。
[root@master ~]# kubeadm init
--apiserver-advertise-address=192.168.142.150
--image-repository=registry.aliyuncs.com/google_containers
--kubernetes-version=v1.17.4
--pod-network-cidr=10.244.0.0/16
--service-cidr=10.96.0.0/12
# To start using your cluster, you need to run the following as a regular user:
[root@master ~]# mkdir -p $HOME/.kube
[root@master ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@master ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
[root@localhost ~]# kubeadm init
> --apiserver-advertise-address=192.168.142.150
> --image-repository=registry.aliyuncs.com/google_containers
> --kubernetes-version=v1.17.4
> --pod-network-cidr=10.244.0.0/16
> --service-cidr=10.96.0.0/12
W1202 00:11:31.437970 57290 validation.go:28] Cannot validate kubelet config - no validator is available
W1202 00:11:31.438028 57290 validation.go:28] Cannot validate kube-proxy config - no validator is available
[init] Using Kubernetes version: v1.17.4
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [localhost.localdomain.k8s.master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.142.150]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost.localdomain.k8s.master localhost] and IPs [192.168.142.150 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost.localdomain.k8s.master localhost] and IPs [192.168.142.150 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
W1202 00:19:51.720839 57290 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[control-plane] Creating static Pod manifest for "kube-scheduler"
W1202 00:19:51.721669 57290 manifests.go:214] the default kube-apiserver authorization-mode is "Node,RBAC"; using "Node,RBAC"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 23.003244 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.17" in namespace kube-system with the configuration for the kubelets in the cluster
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node localhost.localdomain.k8s.master as control-plane by adding the label "node-role.kubernetes.io/master=''"
[mark-control-plane] Marking the node localhost.localdomain.k8s.master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: xs70mz.1j3eaj8unj3g11cp
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.142.150:6443 --token xs70mz.1j3eaj8unj3g11cp
--discovery-token-ca-cert-hash sha256:8cb8adbc0147bc1c15fc689f98ab49e8442d16d28e85062d2db9b8f47225c6cc
[root@localhost ~]#
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
localhost.localdomain.k8s.master NotReady master 4m11s v1.17.4
下面的操作只需要在 node 节点上执行即可。
[root@localhost ~]# kubeadm join 192.168.142.150:6443 --token xs70mz.1j3eaj8unj3g11cp
> --discovery-token-ca-cert-hash sha256:8cb8adbc0147bc1c15fc689f98ab49e8442d16d28e85062d2db9b8f47225c6cc
W1202 00:25:46.290777 86902 join.go:346] [preflight] WARNING: JoinControlPane.controlPlane settings will be ignored when control-plane flag is not set.
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
[kubelet-start] Downloading configuration for the kubelet from the "kubelet-config-1.17" ConfigMap in the kube-system namespace
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...
This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
在 master 上查看节点信息。
[root@localhost ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
localhost.localdomain.k8s.master NotReady master 6m31s v1.17.4
localhost.localdomain.k8s.node1 NotReady 56s v1.17.4
localhost.localdomain.k8s.node2 NotReady 52s v1.17.4
[root@localhost k8s]# kubectl apply -f kube-flannel.yml
namespace/kube-flannel created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
[root@localhost k8s]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
localhost.localdomain.k8s.master Ready master 62m v1.17.4
localhost.localdomain.k8s.node1 Ready 57m v1.17.4
localhost.localdomain.k8s.node2 Ready 57m v1.17.4
[root@localhost k8s]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-6867cdf567-5p8k4 0/1 ContainerCreating 0 83s
[root@localhost k8s]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-6867cdf567-5p8k4 1/1 Running 0 2m47s
[root@localhost k8s]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 443/TCP 71m
nginx NodePort 10.107.6.137 80:31567/TCP 67s
kubernetes 的本质上就是一个集群系统,用户可以在集群中部署各种服务,所谓的部署服务,其实就是在 kubernetes 集群中运行一个个的容器,并将指定的程序跑在容器中。
kubernetes 的最小管理单元是 pod 而不是容器,所以只能将容器放在Pod中,而 kubernetes 一般也不会直接管理 Pod,而是通过Pod 控制器来管理 Pod 的。
Pod 可以提供服务之后,就要考虑如何访问 Pod 中服务,kubernetes 提供了 Service 资源实现这个功能。
当然,如果 Pod 中程序的数据需要持久化,kubernetes 还提供了各种存储系统。
# 查看所有 pod。
kubectl get pod
# 查看某个 pod。
kubectl get pod pod_name
# 查看某个 pod,以 yaml / json 格式展示结果。
kubectl get pod pod_name -o yaml
[root@localhost ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"17", GitVersion:"v1.17.4", GitCommit:"8d8aa39598534325ad77120c120a22b3a990b5ea", GitTreeState:"clean", BuildDate:"2020-03-12T21:03:42Z", GoVersion:"go1.13.8", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"17", GitVersion:"v1.17.4", GitCommit:"8d8aa39598534325ad77120c120a22b3a990b5ea", GitTreeState:"clean", BuildDate:"2020-03-12T20:55:23Z", GoVersion:"go1.13.8", Compiler:"gc", Platform:"linux/amd64"}
[root@localhost ~]# kubectl cluster-info
Kubernetes master is running at https://192.168.142.150:6443
KubeDNS is running at https://192.168.142.150:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
资源类型。
kubernetes 中所有的内容都抽象为资源,可以通过下面的命令进行查看。
kubectl api-resources
经常使用的资源有下面这些。
资源分类
资源名称
缩写
资源作用
集群级别资源
nodes
no
集群组成部分
namespaces
ns
隔离 Pod
pod 资源
pods
po
装载容器
pod 资源控制器
replicationcontrollers
rc
控制 pod 资源
replicasets
rs
控制 pod 资源
deployments
deploy
控制 pod 资源
daemonsets
ds
控制 pod 资源
jobs
控制 pod 资源
cronjobs
cj
控制 pod 资源
horizontalpodautoscalers
hpa
控制 pod 资源
statefulsets
sts
控制 pod 资源
服务发现资源
services
svc
统一 pod 对外接口
ingress
ing
统一 pod 对外接口
存储资源
volumeattachments
存储
persistentvolumes
pv
存储
persistentvolumeclaims
pvc
存储
配置资源
configmaps
cm
配置
secrets
配置
操作。
kubernetes 允许对资源进行多种操作,可以通过–help 查看详细的操作命令。
kubectl --help
经常使用的操作有下面这些。
命令分类
命令
翻译
命令作用
基本命令
create
创建
创建一个资源
edit
编辑
编辑一个资源
get
获取
获取一个资源
patch
更新
更新一个资源
delete
删除
删除一个资源
explain
解释
展示资源文档
运行和调试
run
运行
在集群中运行一个指定的镜像
expose
暴露
暴露资源为 Service
describe
描述
显示资源内部信息
logs
日志输出容器在 pod 中的日志
输出容器在 pod 中的日志
attach
缠绕进入运行中的容器
进入运行中的容器
exec
执行容器中的一个命令
执行容器中的一个命令
cp
复制
在 Pod 内外复制文件
rollout
首次展示
管理资源的发布
scale
规模
扩(缩)容 Pod 的数量
autoscale
自动调整
自动调整 Pod 的数量
高级命令
apply
rc
通过文件对资源进行配置
label
标签
更新资源上的标签
其他命令
cluster-info
集群信息
显示集群
version
版本
显示当前 Server 和 Client 的版本
下面以一个 namespace / pod 的创建和删除简单演示下命令的使用。
# 创建一个 namespace。
[root@localhost ~]# kubectl create namespace dev
namespace/dev created
# 获取 namespace。
[root@localhost ~]# kubectl get ns
NAME STATUS AGE
default Active 13h
dev Active 17s
kube-flannel Active 12h
kube-node-lease Active 13h
kube-public Active 13h
kube-system Active 13h
# 在此 namespace 下创建并运行一个 nginx 的 Pod。
[root@localhost ~]# kubectl run pod --image=nginx:1.17.1 -n dev
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/pod created
# 查看新创建的 pod。不加 -n dev 默认查 default。
[root@localhost ~]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
pod-644584df94-5gx6f 1/1 Running 0 3m33s
[root@localhost ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
nginx-6867cdf567-tlmw5 1/1 Running 0 25m
[root@localhost ~]# kubectl get pod -n default
NAME READY STATUS RESTARTS AGE
nginx-6867cdf567-tlmw5 1/1 Running 0 26m
# 删除指定的 pod。
[root@localhost ~]# kubectl delete pods pod-644584df94-5gx6f -n dev
pod "pod-644584df94-5gx6f" deleted
[root@localhost ~]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
pod-644584df94-wzzcv 0/1 ContainerCreating 0 40s
# 删除后又生成了一个。控制器,后面讲解。
# 删除指定的 namespace。
[root@localhost ~]# kubectl delete ns dev
namespace "dev" deleted
[root@localhost ~]# kubectl get ns
NAME STATUS AGE
default Active 13h
kube-flannel Active 12h
kube-node-lease Active 13h
kube-public Active 13h
kube-system Active 13h
[root@localhost ~]# kubectl get pod -n dev
No resources found in dev namespace.
[root@localhost ~]# kubectl get pod -n dev
[root@localhost ~]# kubectl describe pods pod-644584df94-5gx6f -n dev
Name: pod-644584df94-5gx6f
Namespace: dev
Priority: 0
Node: localhost.localdomain.k8s.node2/192.168.142.152
Start Time: Fri, 02 Dec 2022 13:35:27 +0800
Labels: pod-template-hash=644584df94
run=pod
Annotations:
Status: Running
IP: 10.244.2.3
IPs:
IP: 10.244.2.3
Controlled By: ReplicaSet/pod-644584df94
Containers:
pod:
Container ID: docker://26a71073d6b9f116bd7411aacdf862f77c1d1f485844e33a55aa2590edeb8614
Image: nginx:1.17.1
Image ID: docker-pullable://nginx@sha256:0d17b565c37bcbd895e9d92315a05c1c3c9a29f762b011a10c54a66cd53c9b31
Port:
Host Port:
State: Running
Started: Fri, 02 Dec 2022 13:36:34 +0800
Ready: True
Restart Count: 0
Environment:
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-khzf8 (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
default-token-khzf8:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-khzf8
Optional: false
QoS Class: BestEffort
Node-Selectors:
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 5m24s default-scheduler Successfully assigned dev/pod-644584df94-5gx6f to localhost.localdomain.k8s.node2
Normal Pulling 5m23s kubelet, localhost.localdomain.k8s.node2 Pulling image "nginx:1.17.1"
Normal Pulled 4m19s kubelet, localhost.localdomain.k8s.node2 Successfully pulled image "nginx:1.17.1"
Normal Created 4m18s kubelet, localhost.localdomain.k8s.node2 Created container pod
Normal Started 4m17s kubelet, localhost.localdomain.k8s.node2 Started container pod
3.3.2 命令式对象配置。
命令式对象配置就是使用命令配合配置文件一起来操作 kubernetes 资源。
1) 创建一个 nginxpod.yaml,内容如下。
apiVersion: v1
kind: Namespace
metadata:
name: dev
---
apiVersion: v1
kind: Pod
metadata:
name: nginxpod
namespace: dev
spec:
containers:
- name: nginx-containers
image: nginx:1.17.1
2)执行 create 命令,创建资源。
[root@localhost k8s]# kubectl create -f nginxpod.yaml
namespace/dev created
pod/nginxpod created
此时发现创建了两个资源对象,分别是 namespace 和 pod。
[root@localhost k8s]# kubectl get ns dev
NAME STATUS AGE
dev Active 37s
[root@localhost k8s]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
nginxpod 0/1 ContainerCreating 0 43s
[root@localhost k8s]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
nginxpod 1/1 Running 0 90s
3)执行 get 命令,查看资源。
[root@localhost k8s]# kubectl get -f nginxpod.yaml
NAME STATUS AGE
namespace/dev Active 2m15s
NAME READY STATUS RESTARTS AGE
pod/nginxpod 1/1 Running 0 2m14s
默认情况下,kubernetes 集群中的所有的 Pod 都是可以相互访问的。但是在实际中,可能不想让两个 Pod 之间进行互相的访问,那此时就可以将两个 Pod 划分到不同的 namespace 下。kubernetes 通过将集群内部的资源分配到不同的 Namespace 中,可以形成逻辑上的”组”,以方便不同的组的资源进行隔离使用和管理。
可以通过 kubernetes 的授权机制,将不同的 namespace 交给不同租户进行管理,这样就实现了多租户的资源隔离。此时还能结合 kubernetes 的资源配额机制,限定不同租户能占用的资源,例如 CPU 使用量、内存使用量等等,来实现租户可用资源的管理。
kubernetes 在集群启动之后,会默认创建几个 namespace。
[root@localhost ~]# kubectl get ns
NAME STATUS AGE
default Active 13h # 所有未指定 Namespace 的对象都会被分配在 default 命名空间。
#dev Active 17m
#kube-flannel Active 12h
kube-node-lease Active 13h # 集群节点之间的心跳维护,v1.13 开始引入。
kube-public Active 13h # 此命名空间下的资源可以被所有人访问(包括未认证用户)。
kube-system Active 13h # 所有由 Kubernetes 系统创建的资源都处于这个命名空间。
下面来看 namespace 资源的具体操作。
4.1.1 查看。
# 查看所有的 ns。命令:kubectl get ns
[root@localhost ~]# kubectl get ns
NAME STATUS AGE
default Active 13h
dev Active 23m
kube-flannel Active 12h
kube-node-lease Active 13h
kube-public Active 13h
kube-system Active 13h
[root@localhost ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-9d85f5447-5959v 1/1 Running 0 13h
coredns-9d85f5447-gvqxh 1/1 Running 0 13h
etcd-localhost.localdomain.k8s.master 1/1 Running 0 13h
kube-apiserver-localhost.localdomain.k8s.master 1/1 Running 0 13h
kube-controller-manager-localhost.localdomain.k8s.master 1/1 Running 0 13h
kube-proxy-7dc95 1/1 Running 1 13h
kube-proxy-7hss2 1/1 Running 0 13h
kube-proxy-rpnvx 1/1 Running 0 13h
kube-scheduler-localhost.localdomain.k8s.master 1/1 Running 0 13h
# 查看指定的 ns。命令:kubectl get ns ns 名称
[root@localhost ~]# kubectl get ns default
NAME STATUS AGE
default Active 13h
# 指定输出格式。命令:kubectl get ns ns 名称 -o 格式参数
# kubernetes 支持的格式有很多,比较常见的是 wide、json、yaml
[root@localhost ~]# kubectl get ns default -o yaml
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: "2022-12-01T16:20:14Z"
name: default
resourceVersion: "146"
selfLink: /api/v1/namespaces/default
uid: 11fb6c7d-f67f-4d9e-8e2b-66621c0d5d08
spec:
finalizers:
- kubernetes
status:
phase: Active
# 查看 ns 详情。命令:kubectl describe ns ns 名称
[root@localhost ~]# kubectl describe ns default
Name: default
Labels:
Annotations:
Status: Active
No resource quota.
No LimitRange resource.
# status
# Active 命名空间正在使用中 Terminating 正在删除命名空间。
# ResourceQuota 针对 namespace 做的资源限制。
# LimitRange 针对 namespace 中的每个组件做的资源限制。
4.1.2 创建。
# 创建 namespace。
[root@master ~]# kubectl create ns dev
namespace/dev created
# 命令格式:kubectl run (pod 控制器名称) [参数]
# --image 指定 Pod 的镜像。
# --port 指定端口。
# --namespace 指定 namespace。
[root@localhost k8s]# kubectl run nginx --image=nginx:1.17.1 --port=80 --namespace dev
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created
[root@localhost k8s]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
nginx-64777cd554-kwbhk 1/1 Running 0 2m8s
[root@localhost k8s]# kubectl get pod -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-64777cd554-kwbhk 1/1 Running 0 2m13s 10.244.2.7 localhost.localdomain.k8s.node2
# READY ~ pod 中容器数量。
4.2.2 查看 pod 信息。
# 查看 Pod 基本信息。
[root@localhost k8s]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
nginx-64777cd554-kwbhk 1/1 Running 0 4m26s
# 查看 Pod 的详细信息。
[root@localhost k8s]# kubectl describe pod nginx -n dev
Name: nginx-64777cd554-kwbhk
Namespace: dev
Priority: 0
Node: localhost.localdomain.k8s.node2/192.168.142.152
Start Time: Fri, 02 Dec 2022 14:41:15 +0800
Labels: pod-template-hash=64777cd554
run=nginx
Annotations:
Status: Running
IP: 10.244.2.7
IPs:
IP: 10.244.2.7
Controlled By: ReplicaSet/nginx-64777cd554
Containers:
nginx:
Container ID: docker://f96e2bc540280474d7e6f7942ae42c08b485b86e18c090152d0bcdf6ea6fed21
Image: nginx:1.17.1
Image ID: docker-pullable://nginx@sha256:b4b9b3eee194703fc2fa8afa5b7510c77ae70cfba567af1376a573a967c03dbb
Port: 80/TCP
Host Port: 0/TCP
State: Running
Started: Fri, 02 Dec 2022 14:41:17 +0800
Ready: True
Restart Count: 0
Environment:
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-z9vht (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
default-token-z9vht:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-z9vht
Optional: false
QoS Class: BestEffort
Node-Selectors:
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 4m37s default-scheduler Successfully assigned dev/nginx-64777cd554-kwbhk to localhost.localdomain.k8s.node2
Normal Pulled 4m36s kubelet, localhost.localdomain.k8s.node2 Container image "nginx:1.17.1" already present on machine
Normal Created 4m36s kubelet, localhost.localdomain.k8s.node2 Created container nginx
Normal Started 4m35s kubelet, localhost.localdomain.k8s.node2 Started container nginx
4.2.3 访问 Pod。
# 获取 pod IP。
[root@localhost k8s]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-64777cd554-kwbhk 1/1 Running 0 8m32s 10.244.2.7 localhost.localdomain.k8s.node2
# 访问 POD。
[root@localhost k8s]# curl http://10.244.2.7:80
Welcome to nginx!
body {
width: 35em;
margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif;
}
Welcome to nginx!
If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.
For online documentation and support please refer to
nginx.org.
Commercial support is available at
nginx.com.
Thank you for using nginx.
4.2.4 删除指定 Pod。
# 删除指定 Pod。
[root@localhost k8s]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
nginx-64777cd554-kwbhk 1/1 Running 0 11m
# 此时,显示删除 Pod 成功,但是再查询,发现又新产生了一个。
[root@localhost k8s]# kubectl get pod -n dev
NAME READY STATUS RESTARTS AGE
nginx-64777cd554-xwknq 1/1 Running 0 26s
# kubectl run (pod 控制器名称) [参数]
# 这是因为当前 Pod 是由 Pod 控制器创建的,控制器会监控 Pod 状况,一旦发现 Pod 死亡,会立即重建。
# 此时要想删除 Pod,必须删除 Pod 控制器。
# 先来查询一下当前 namespace 下的 Pod 控制器。
[root@localhost k8s]# kubectl get deploy -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
nginx 1/1 1 1 12m
# 接下来,删除此 Pod 控制器。
[root@localhost k8s]# kubectl delete deploy nginx -n dev
deployment.apps "nginx" deleted
# 稍等片刻,再查询 Pod,发现 Pod 被删除了。
[root@localhost k8s]# kubectl get pods -n dev
No resources found in dev namespace.
4.2.5 配置操作。
创建一个 pod-nginx.yaml,内容如下。
apiVersion: v1
kind: Pod
metadata:
name: nginx
namespace: dev
spec:
containers:
- image: nginx:1.17.1
name: pod
ports:
- name: nginx-port
containerPort: 80
protocol: TCP
然后就可以执行对应的创建和删除命令了。
[root@localhost k8s]# vim pod-nginx.yaml
[root@localhost k8s]# kubectl create -f pod-nginx.yaml
pod/nginx created
[root@localhost k8s]# kubectl delete -f pod-nginx.yaml
pod "nginx" deleted
name in (master, slave): 选择所有包含 Label 中的 key=”name” 且 value=”master” 或 “slave” 的对象。
name not in (frontend): 选择所有包含 Label 中的 key=”name” 且 value 不等于 “frontend” 的对象。
name=slave,env!=production
name not in (frontend),env!=production
4.3.1 命令方式。
[root@localhost k8s]# kubectl create -f pod-nginx.yaml
pod/nginx created
# 查看标签。
[root@localhost k8s]# kubectl get pod -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx 1/1 Running 0 69s
# 为 pod 资源打标签。
[root@localhost k8s]# kubectl label pod nginx version=1.0 -n dev
pod/nginx labeled
# 查看标签。
[root@localhost k8s]# kubectl get pod -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx 1/1 Running 0 2m19s version=1.0
# 追加打标签。
[root@localhost k8s]# kubectl label pod nginx tier=back -n dev
pod/nginx labeled
# 查看标签。
[root@localhost k8s]# kubectl get pod -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx 1/1 Running 0 3m4s tier=back,version=1.0
# 为 pod 资源更新标签。
[root@localhost k8s]# kubectl label pod nginx version=2.0 -n dev
error: 'version' already has a value (1.0), and --overwrite is false
[root@localhost k8s]# kubectl label pod nginx version=2.0 -n dev --overwrite
pod/nginx labeled
[root@localhost k8s]# kubectl get pod -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
# 筛选标签。
apiVersion: v1
kind: Pod
metadata:
name: nginx1
namespace: dev
spec:
containers:
- image: nginx:1.17.1
name: pod
ports:
- name: nginx-port
containerPort: 80
protocol: TCP
[root@localhost k8s]# vim pod-nginx1.yaml
[root@localhost k8s]# kubectl create -f pod-nginx1.yaml
pod/nginx1 created
[root@localhost k8s]# kubectl label pod nginx1 version=1.0 -n dev --overwrite
pod/nginx1 labeled
[root@localhost k8s]# kubectl get pods -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx 1/1 Running 0 10m tier=back,version=2.0
nginx1 1/1 Running 0 48s version=1.0
[root@localhost k8s]# kubectl get pod -n dev -l version=2.0 --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx 1/1 Running 0 11m tier=back,version=2.0
[root@localhost k8s]# kubectl get pod -n dev -l version!=2.0 --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx1 1/1 Running 0 117s version=1.0
[root@localhost k8s]# kubectl get pod -n dev -l version=3.0 --show-labels
No resources found in dev namespace.
# 删除标签。
[root@localhost k8s]# kubectl label pod nginx tier- -n dev
pod/nginx labeled
[root@localhost k8s]# kubectl get pods -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx 1/1 Running 0 14m version=2.0
nginx1 1/1 Running 0 4m32s version=1.0
4.3.2 配置方式。
apiVersion: v1
kind: Pod
metadata:
name: nginx
namespace: dev
labels:
version: "3.0"
env: "test"
spec:
containers:
- image: nginx:1.17.1
name: pod
ports:
- name: nginx-port
containerPort: 80
protocol: TCP
然后就可以执行对应的更新命令了:kubectl apply -f pod-nginx.yaml。
4.4 Deployment。
在 kubernetes 中,Pod 是最小的控制单元,但是 kubernetes 很少直接控制 Pod,一般都是通过 Pod 控制器来完成的。Pod 控制器用于 pod 的管理,确保 pod 资源符合预期的状态,当 pod 的资源出现故障时,会尝试进行重启或重建 pod。
在 kubernetes 中 Pod 控制器的种类有很多,本章节只介绍一种:Deployment。
4.4.1 命令操作。
kubectl run。。。
底层使用了 pod 控制器。
# 命令格式: kubectl create deployment 名称 [参数]
# --image 指定 pod 的镜像。
# --port 指定端口。
# --replicas 指定创建 pod 数量。
# --namespace 指定 namespace。
[root@localhost ~]# kubectl delete ns dev
namespace "dev" deleted
[root@localhost ~]# kubectl create ns dev
namespace/dev created
[root@localhost ~]# kubectl get deployment, pods -n dev
error: arguments in resource/name form must have a single resource and name
[root@localhost ~]# kubectl get deployment,pods -n dev
No resources found in dev namespace.
[root@localhost ~]# kubectl run nginx --image=nginx:1.17.1 --port=80 --replicas=3 -n dev
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created
# 查看创建的 Pod。
[root@localhost ~]# kubectl get deployment,pods -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx 2/3 3 2 44s
NAME READY STATUS RESTARTS AGE
pod/nginx-64777cd554-64kzc 0/1 ContainerCreating 0 44s
pod/nginx-64777cd554-6zmhj 1/1 Running 0 44s
pod/nginx-64777cd554-tdrjw 1/1 Running 0 44s
# 查看 deployment 的信息。
[root@localhost ~]# kubectl get deploy -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
nginx 3/3 3 3 4m16s
[root@localhost ~]# kubectl get pods -n dev --show-labels
NAME READY STATUS RESTARTS AGE LABELS
nginx-64777cd554-64kzc 1/1 Running 0 3m38s pod-template-hash=64777cd554,run=nginx
nginx-64777cd554-6zmhj 1/1 Running 0 3m38s pod-template-hash=64777cd554,run=nginx
nginx-64777cd554-tdrjw 1/1 Running 0 3m38s pod-template-hash=64777cd554,run=nginx
# UP-TO-DATE:成功升级的副本数量
# AVAILABLE:可用副本的数量
[root@localhost ~]# kubectl get deploy -n dev -o wide
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
nginx 3/3 3 3 6m7s nginx nginx:1.17.1 run=nginx
# 查看 deployment 的详细信息。
[root@localhost ~]# kubectl describe deploy nginx -n dev
Name: nginx
Namespace: dev
CreationTimestamp: Fri, 02 Dec 2022 23:28:04 +0800
Labels: run=nginx
Annotations: deployment.kubernetes.io/revision: 1
Selector: run=nginx
Replicas: 3 desired | 3 updated | 3 total | 3 available | 0 unavailable
StrategyType: RollingUpdate
MinReadySeconds: 0
RollingUpdateStrategy: 25% max unavailable, 25% max surge
Pod Template:
Labels: run=nginx
Containers:
nginx:
Image: nginx:1.17.1
Port: 80/TCP
Host Port: 0/TCP
Environment:
Mounts:
Volumes:
Conditions:
Type Status Reason
---- ------ ------
Available True MinimumReplicasAvailable
Progressing True NewReplicaSetAvailable
OldReplicaSets:
NewReplicaSet: nginx-64777cd554 (3/3 replicas created)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ScalingReplicaSet 6m33s deployment-controller Scaled up replica set nginx-64777cd554 to 3
# 删除。
[root@localhost ~]# kubectl delete deploy nginx -n dev
deployment.apps "nginx" deleted
[root@localhost ~]# kubectl get pods -n dev
No resources found in dev namespace.
[root@localhost k8s]# vim deploy-nginx.yaml
[root@localhost k8s]# kubectl create -f deploy-nginx.yaml
deployment.apps/nginx created
[root@localhost k8s]# kubectl get deployment,pods -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx 3/3 3 3 33s
NAME READY STATUS RESTARTS AGE
pod/nginx-64777cd554-25w54 1/1 Running 0 32s
pod/nginx-64777cd554-p862l 1/1 Running 0 32s
pod/nginx-64777cd554-ppsbl 1/1 Running 0 33s
[root@localhost k8s]# kubectl delete -f deploy-nginx.yaml
deployment.apps "nginx" deleted
[root@localhost k8s]# kubectl get deployment,pods -n dev
NAME READY STATUS RESTARTS AGE
pod/nginx-64777cd554-25w54 0/1 Terminating 0 59s
pod/nginx-64777cd554-p862l 0/1 Terminating 0 59s
pod/nginx-64777cd554-ppsbl 0/1 Terminating 0 60s
[root@localhost k8s]# kubectl get deployment,pods -n dev
No resources found in dev namespace.
[root@localhost k8s]#
4.5 Service。
通过上节课的学习,已经能够利用 Deployment 来创建一组 Pod 来提供具有高可用性的服务。
[root@localhost k8s]# kubectl create -f deploy-nginx.yaml
deployment.apps/nginx created
[root@localhost k8s]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-64777cd554-8d26x 1/1 Running 0 19s 10.244.1.5 localhost.localdomain.k8s.node1
nginx-64777cd554-dtm27 1/1 Running 0 19s 10.244.2.17 localhost.localdomain.k8s.node2
nginx-64777cd554-t9x4n 1/1 Running 0 19s 10.244.2.16 localhost.localdomain.k8s.node2
[root@localhost k8s]# curl 10.244.1.5
curl: (7) Failed connect to 10.244.1.5:80; Connection refused
# 删除后,会重新创建一个新 pod,ip 会变。
[root@localhost k8s]# kubectl delete pod nginx-64777cd554-t9x4n -n dev
pod "nginx-64777cd554-t9x4n" deleted
[root@localhost k8s]# kubectl get pods -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
nginx-64777cd554-8d26x 1/1 Running 0 4m50s 10.244.1.5 localhost.localdomain.k8s.node1
nginx-64777cd554-ck66j 1/1 Running 0 10s 10.244.2.18 localhost.localdomain.k8s.node2
nginx-64777cd554-dtm27 1/1 Running 0 4m50s 10.244.2.17 localhost.localdomain.k8s.node2
虽然每个 Pod 都会分配一个单独的 Pod IP,然而却存在如下两问题:
Pod IP 会随着 Pod 的重建产生变化。
Pod IP 仅仅是集群内可见的虚拟 IP,外部无法访问。
这样对于访问这个服务带来了难度。因此,kubernetes 设计了 Service 来解决这个问题。
Service 可以看作是一组同类 Pod 对外的访问接口。借助 Service,应用可以方便地实现服务发现和负载均衡。
4.5.1 创建集群内部可访问的 Service。
# 暴露 Service。
[root@localhost k8s]# kubectl expose deploy nginx --name=svc-nginx1 --type=ClusterIP --port=80 --target-port=80 -n dev
service/svc-nginx1 exposed
# 查看 service。
[root@localhost k8s]# kubectl get service -n dev
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc-nginx1 ClusterIP 10.104.13.248 80/TCP 17s
# 这里产生了一个 CLUSTER-IP,这就是 service 的 IP,在 Service 的生命周期中,这个地址是不会变动的。
# 可以通过这个 IP 访问当前 service 对应的 POD。
[root@master ~]# curl 10.104.13.248:80
Welcome to nginx!
Welcome to nginx!
.......
4.5.2 创建集群外部也可访问的 Service。
# 上面创建的 Service 的 type 类型为 ClusterIP,这个 ip 地址只用集群内部可访问。
# 如果需要创建外部也可以访问的 Service,需要修改 type 为 NodePort。
[root@localhost k8s]# kubectl expose deploy nginx --name=svc-nginx2 --type=NodePort --port=80 --target-port=80 -n dev
service/svc-nginx2 exposed
# 此时查看,会发现出现了 NodePort 类型的 Service,而且有一对 Port(80:31928/TC)。
[root@localhost k8s]# kubectl get svc -n dev
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc-nginx1 ClusterIP 10.104.13.248 80/TCP 22m
svc-nginx2 NodePort 10.97.109.91 80:30228/TCP 2m17s
# 接下来就可以通过集群外的主机访问节点 IP:30228 访问服务了。
# 例如在的电脑主机上通过浏览器访问下面的地址。
http://192.168.142.150:30228
4.5.3 删除 Service。
[root@localhost k8s]# kubectl delete service svc-nginx1 -n dev
service "svc-nginx1" deleted
[root@localhost k8s]# kubectl delete svc svc-nginx2 -n dev
service "svc-nginx2" deleted
[root@localhost k8s]# vim svc-nginx.yaml
[root@localhost k8s]# kubectl create -f svc-nginx.yaml
service/svc-nginx created
[root@localhost k8s]# kubectl get service -n dev
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc-nginx ClusterIP 10.109.179.231 80/TCP 13s
[root@localhost k8s]# curl 10.109.179.231:80
curl: (7) Failed connect to 10.109.179.231:80; Connection refused
[root@localhost k8s]# kubectl delete -f svc-nginx.yaml
service "svc-nginx" deleted
[root@localhost k8s]# kubectl get service -n dev
No resources found in dev namespace.
# 小提示:
# 在这里,可通过一个命令来查看每种资源的可配置项。
# kubectl explain 资源类型 查看某种资源可以配置的一级属性。
# kubectl explain 资源类型.属性 查看属性的子属性。
[root@localhost k8s]# kubectl explain pod
KIND: Pod
VERSION: v1
DESCRIPTION:
Pod is a collection of containers that can run on a host. This resource is
created by clients and scheduled onto hosts.
FIELDS:
apiVersion
APIVersion defines the versioned schema of this representation of an
object. Servers should convert recognized schemas to the latest internal
value, and may reject unrecognized values. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
kind
Kind is a string value representing the REST resource this object
represents. Servers may infer this from the endpoint the client submits
requests to. Cannot be updated. In CamelCase. More info:
https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
metadata
[root@localhost k8s]# kubectl explain pod.spec.containers.ports
KIND: Pod
VERSION: v1
RESOURCE: ports
DESCRIPTION:
List of ports to expose from the container. Exposing a port here gives the
system additional information about the network connections a container
uses, but is primarily informational. Not specifying a port here DOES NOT
prevent that port from being exposed. Any port which is listening on the
default "0.0.0.0" address inside a container will be accessible from the
network. Cannot be updated.
ContainerPort represents a network port in a single container.
FIELDS:
# 容器要监听的端口(0 -required-
Number of port to expose on the pod's IP address. This must be a valid port
number, 0
What host IP to bind the external port to.
# 容器要在主机上公开的端口,如果设置,主机上只能运行容器的一个副本(一般省略)。
hostPort
Number of port to expose on the host. If specified, this must be a valid
port number, 0
If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
named port in a pod must have a unique name. Name for the port that can be
referred to by services.
# 端口协议。必须是 UDP、TCP 或 SCTP。默认为“TCP”。
protocol
Protocol for port. Must be UDP, TCP, or SCTP. Defaults to "TCP".
[root@localhost k8s]# kubectl explain pod.spec.containers.livenessProbe
KIND: Pod
VERSION: v1
RESOURCE: livenessProbe
DESCRIPTION:
Periodic probe of container liveness. Container will be restarted if the
probe fails. Cannot be updated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
Probe describes a health check to be performed against a container to
determine whether it is alive or ready to receive traffic.
FIELDS:
exec
One and only one of the following should be specified. Exec specifies the
action to take.
# 连续探测失败多少次才被认定为失败。默认是 3。最小值是 1。
failureThreshold
Minimum consecutive failures for the probe to be considered failed after
having succeeded. Defaults to 3. Minimum value is 1.
httpGet
HTTPGet specifies the http request to perform.
# 容器启动后等待多少秒执行第一次探测。
initialDelaySeconds
Number of seconds after the container has started before liveness probes
are initiated. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
# 执行探测的频率。默认是 10 秒,最小 1 秒。
periodSeconds
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum
value is 1.
# 连续探测成功多少次才被认定为成功。默认是 1。
successThreshold
Minimum consecutive successes for the probe to be considered successful
after having failed. Defaults to 1. Must be 1 for liveness and startup.
Minimum value is 1.
tcpSocket
TCPSocket specifies an action involving a TCP port. TCP hooks not yet
supported
# 探测超时时间。默认 1 秒,最小 1 秒。
timeoutSeconds
Number of seconds after which the probe times out. Defaults to 1 second.
Minimum value is 1. More info:
https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
# 创建 pod。
[root@localhost k8s]# vim pod-restartpolicy.yaml
[root@localhost k8s]# kubectl create -f pod-restartpolicy.yaml
pod/pod-restartpolicy created
# 查看 Pod 详情,发现 nginx 容器失败。
[root@localhost k8s]# kubectl describe pods pod-restartpolicy -n dev
Name: pod-restartpolicy
Namespace: dev
Priority: 0
Node: localhost.localdomain.k8s.node2/192.168.142.152
Start Time: Sat, 03 Dec 2022 15:50:00 +0800
Labels:
Annotations:
Status: Running
IP: 10.244.2.29
IPs:
IP: 10.244.2.29
Containers:
nginx:
Container ID: docker://5e3a86bc04bf5eff2a861c0db69fb2c87ae730fe680b633def25d7a18c08ed05
Image: nginx:1.17.1
Image ID: docker-pullable://nginx@sha256:b4b9b3eee194703fc2fa8afa5b7510c77ae70cfba567af1376a573a967c03dbb
Port: 80/TCP
Host Port: 0/TCP
State: Running
Started: Sat, 03 Dec 2022 15:50:01 +0800
Ready: True
Restart Count: 0
Liveness: http-get http://:80/hello delay=0s timeout=1s period=10s #success=1 #failure=3
Environment:
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-n8qxp (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
default-token-n8qxp:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-n8qxp
Optional: false
QoS Class: BestEffort
Node-Selectors:
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 17s default-scheduler Successfully assigned dev/pod-restartpolicy to localhost.localdomain.k8s.node2
Normal Pulled 16s kubelet, localhost.localdomain.k8s.node2 Container image "nginx:1.17.1" already present on machine
Normal Created 16s kubelet, localhost.localdomain.k8s.node2 Created container nginx
Normal Started 16s kubelet, localhost.localdomain.k8s.node2 Started container nginx
Warning Unhealthy 3s (x2 over 13s) kubelet, localhost.localdomain.k8s.node2 Liveness probe failed: HTTP probe failed with statuscode: 404
# 多等一会,再观察 pod 的重启次数,发现一直是 0,并未重启。
[root@localhost k8s]# kubectl get pods pod-restartpolicy -n dev
NAME READY STATUS RESTARTS AGE
pod-restartpolicy 0/1 Completed 0 78s
5.4 Pod 调度。
在默认情况下,一个 Pod 在哪个 Node 节点上运行,是由 Scheduler 组件采用相应的算法计算出来的,这个过程是不受人工控制的。但是在实际使用中,这并不满足的需求,因为很多情况下,我们想控制某些 Pod 到达某些节点上,那么应该怎么做呢?这就要求了解 kubernetes 对 Pod 的调度规则,kubernetes 提供了四大类调度方式。
自动调度:运行在哪个节点上完全由 Scheduler 经过一系列的算法计算得出。
定向调度:NodeName、NodeSelector。
亲和性调度:NodeAffinity、PodAffinity、PodAntiAffinity。
污点(容忍)调度:Taints、Toleration。
5.4.1 定向调度。
定向调度,指的是利用在 pod 上声明 nodeName 或者 nodeSelector,以此将 Pod 调度到期望的 node 节点上。注意,这里的调度是强制的,这就意味着即使要调度的目标 Node 不存在,也会向上面进行调度,只不过 pod 运行失败而已。
– NodeName。
NodeName 用于强制约束将 Pod 调度到指定的 Name 的 Node 节点上。这种方式,其实是直接跳过 Scheduler 的调度逻辑,直接将 Pod 调度到指定名称的节点。
[root@localhost k8s]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
localhost.localdomain.k8s.master Ready master 39h v1.17.4
localhost.localdomain.k8s.node1 Ready 39h v1.17.4
localhost.localdomain.k8s.node2 Ready 39h v1.17.4
[root@localhost k8s]# kubectl explain pod.spec.affinity
KIND: Pod
VERSION: v1
RESOURCE: affinity
DESCRIPTION:
If specified, the pod's scheduling constraints
Affinity is a group of affinity scheduling rules.
FIELDS:
nodeAffinity
Describes node affinity scheduling rules for the pod.
podAffinity
Describes pod affinity scheduling rules (e.g. co-locate this pod in the
same node, zone, etc. as some other pod(s)).
podAntiAffinity
Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod
in the same node, zone, etc. as some other pod(s)).
[root@localhost k8s]# kubectl explain pod.spec.affinity.nodeAffinity
KIND: Pod
VERSION: v1
RESOURCE: nodeAffinity
DESCRIPTION:
Describes node affinity scheduling rules for the pod.
Node affinity is a group of node affinity scheduling rules.
FIELDS:
preferredDuringSchedulingIgnoredDuringExecution
The scheduler will prefer to schedule pods to nodes that satisfy the
affinity expressions specified by this field, but it may choose a node that
violates one or more of the expressions. The node that is most preferred is
the one with the greatest sum of weights, i.e. for each node that meets all
of the scheduling requirements (resource request, requiredDuringScheduling
affinity expressions, etc.), compute a sum by iterating through the
elements of this field and adding "weight" to the sum if the node matches
the corresponding matchExpressions; the node(s) with the highest sum are
the most preferred.
requiredDuringSchedulingIgnoredDuringExecution
If the affinity requirements specified by this field are not met at
scheduling time, the pod will not be scheduled onto the node. If the
affinity requirements specified by this field cease to be met at some point
during pod execution (e.g. due to an update), the system may or may not try
to eventually evict the pod from its node.
# 创建 pod。
[root@localhost k8s]# vim pod-nodeaffinity-preferred.yaml
[root@localhost k8s]# kubectl create -f pod-nodeaffinity-preferred.yaml
pod/pod-nodeaffinity-preferred created
# 查看 pod 状态 (运行成功)。
[root@localhost k8s]# kubectl get pod pod-nodeaffinity-preferred -n dev -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
pod-nodeaffinity-preferred 1/1 Running 0 21s 10.244.2.33 localhost.localdomain.k8s.node2
strategy:指定新的 Pod 替换旧的 Pod 的策略,支持两个属性。
type:指定策略类型,支持两种策略。
Recreate:在创建出新的 Pod 之前会先杀掉所有已存在的 Pod。
RollingUpdate:滚动更新,就是杀死一部分,就启动一部分,在更新过程中,存在两个版本 Pod。
rollingUpdate:当 type 为 RollingUpdate 时生效,用于为 RollingUpdate 设置参数,支持两个属性。
maxUnavailable:用来指定在升级过程中不可用 Pod 的最大数量,默认为 25%。
max 违规词汇: 用来指定在升级过程中可以超过期望的 Pod 的最大数量,默认为 25%。
# 查看当前升级版本的状态。
[root@localhost k8s]# kubectl rollout status deploy pc-deployment -n dev
deployment "pc-deployment" successfully rolled out
[root@localhost k8s]# kubectl set image deploy pc-deployment nginx=nginx:1.17.1 -n dev
deployment.apps/pc-deployment image updated
[root@localhost k8s]# kubectl rollout status deploy pc-deployment -n dev
Waiting for deployment "pc-deployment" rollout to finish: 1 out of 3 new replicas have been updated...
Waiting for deployment "pc-deployment" rollout to finish: 1 out of 3 new replicas have been updated...
Waiting for deployment "pc-deployment" rollout to finish: 1 out of 3 new replicas have been updated...
Waiting for deployment "pc-deployment" rollout to finish: 2 out of 3 new replicas have been updated...
Waiting for deployment "pc-deployment" rollout to finish: 2 out of 3 new replicas have been updated...
Waiting for deployment "pc-deployment" rollout to finish: 2 old replicas are pending termination...
Waiting for deployment "pc-deployment" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "pc-deployment" rollout to finish: 1 old replicas are pending termination...
deployment "pc-deployment" successfully rolled out
# 查看升级历史记录。
[root@localhost k8s]# kubectl rollout history deploy pc-deployment -n dev
deployment.apps/pc-deployment
REVISION CHANGE-CAUSE
4 kubectl create --filename=pc-deployment.yaml --record=true
5 kubectl create --filename=pc-deployment.yaml --record=true
# 可以发现有 5 次版本记录,说明完成过 4 次升级。
# 版本回滚。
# 这里直接使用 --to-revision=1 回滚到了 1 版本,如果省略这个选项,就是回退到上个版本,就是 2 版本。
[root@localhost k8s]# kubectl rollout undo deployment pc-deployment --to-revision=1 -n dev
error: unable to find specified revision 1 in history
[root@localhost k8s]# kubectl rollout undo deployment pc-deployment --to-revision=4 -n dev
deployment.apps/pc-deployment rolled back
# 查看发现,通过 nginx 镜像版本可以发现到了第一版。
[root@localhost k8s]# kubectl get deploy -n dev -o wide
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
pc-deployment 3/3 3 3 19m nginx nginx:1.17.2 app=nginx-pod
# 查看 rs,发现第一个 rs 中有 3 个 pod 运行,后面两个版本的 rs 中 pod 为运行。
# 其实 deployment 之所以可是实现版本的回滚,就是通过记录下历史 rs 来实现的,一旦想回滚到哪个版本,只需要将当前版本 pod 数量降为 0,然后将回滚版本的 pod 提升为目标数量就可以了。
[root@localhost k8s]# kubectl get rs -n dev
NAME DESIRED CURRENT READY AGE
pc-deployment-5d89bdfbf9 0 0 0 18m
pc-deployment-675d469f8b 3 3 3 14m
[root@localhost k8s]# kubectl rollout history deploy pc-deployment -n dev
deployment.apps/pc-deployment
REVISION CHANGE-CAUSE
5 kubectl create --filename=pc-deployment.yaml --record=true
6 kubectl create --filename=pc-deployment.yaml --record=true
比如有一批新的 Pod 资源创建完成后立即暂停更新过程,此时,仅存在一部分新版本的应用,主体部分还是旧的版本。然后,再筛选一小部分的用户请求路由到新版本的 Pod 应用,继续观察能否稳定地按期望的方式运行。确定没问题之后再继续完成余下的 Pod 资源滚动更新,否则立即回滚更新操作。这就是所谓的金丝雀发布。
# 更新 deployment 的版本,并配置暂停 deployment。
[root@localhost k8s]# kubectl set image deploy pc-deployment nginx=nginx:1.17.1 -n dev && kubectl rollout pause deployment pc-deployment -n dev
deployment.apps/pc-deployment image updated
deployment.apps/pc-deployment paused
# 观察更新状态。
[root@localhost k8s]# kubectl rollout status deploy pc-deployment -n dev
Waiting for deployment "pc-deployment" rollout to finish: 1 out of 3 new replicas have been updated...
# 监控更新的过程,可以看到已经新增了一个资源,但是并未按照预期的状态去删除一个旧的资源,就是因为使用了 pause 暂停命令。
[root@localhost k8s]# kubectl get rs -n dev -o wide
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
pc-deployment-5d89bdfbf9 1 1 1 24m nginx nginx:1.17.1 app=nginx-pod,pod-template-hash=5d89bdfbf9
pc-deployment-675d469f8b 3 3 3 20m nginx nginx:1.17.2 app=nginx-pod,pod-template-hash=675d469f8b
[root@localhost k8s]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
pc-deployment-5d89bdfbf9-qnnvk 1/1 Running 0 94s
pc-deployment-675d469f8b-bk7zd 1/1 Running 0 8m10s
pc-deployment-675d469f8b-jjwkr 1/1 Running 0 8m13s
pc-deployment-675d469f8b-prbfd 1/1 Running 0 8m9s
# 确保更新的 pod 没问题了,继续更新。
[root@localhost k8s]# kubectl rollout resume deploy pc-deployment -n dev
deployment.apps/pc-deployment resumed
# 查看最后的更新情况
[root@localhost k8s]# kubectl get rs -n dev -o wide
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
pc-deployment-5d89bdfbf9 3 3 3 26m nginx nginx:1.17.1 app=nginx-pod,pod-template-hash=5d89bdfbf9
pc-deployment-675d469f8b 0 0 0 22m nginx nginx:1.17.2 app=nginx-pod,pod-template-hash=675d469f8b
[root@localhost k8s]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
pc-deployment-5d89bdfbf9-2vccz 1/1 Running 0 74s
pc-deployment-5d89bdfbf9-7j727 1/1 Running 0 73s
pc-deployment-5d89bdfbf9-qnnvk 1/1 Running 0 3m21s
在前面的课程中,我们已经可以实现通过手工执行 kubectl scale 命令实现 Pod 扩容或缩容,但是这显然不符合 Kubernetes 的定位目标–自动化、智能化。Kubernetes 期望可以实现通过监测 Pod 的使用情况,实现 pod 数量的自动调整,于是就产生了 Horizontal Pod Autoscaler(HPA)这种控制器。
HPA 可以获取每个 Pod 利用率,然后和 HPA 中定义的指标进行对比,同时计算出需要伸缩的具体值,最后实现 Pod 的数量的调整。其实 HPA 与之前的 Deployment 一样,也属于一种 Kubernetes 资源对象,它通过追踪分析 RC 控制的所有目标 Pod 的负载变化情况,来确定是否需要针对性地调整目标 Pod 的副本数,这是 HPA 的实现原理。
# 安装 git
[root@k8s-master01 ~]# yum install git -y
# 获取 metrics-server,注意使用的版本。
[root@k8s-master01 ~]# git clone -b v0.3.6 https://github.com/kubernetes-incubator/metrics-server
geek@LAPTOP-0GJSKR6T MINGW64 /d/lyfGeek。download。/metrics_server
$ git clone -b v0.3.6 https://github.com/kubernetes-incubator/metrics-server
Cloning into 'metrics-server'...
remote: Enumerating objects: 14982, done.
remote: Counting objects: 100% (92/92), done.
remote: Compressing objects: 100% (59/59), done.
remote: Total 14982 (delta 35), reused 72 (delta 30), pack-reused 14890
Receiving objects: 100% (14982/14982), 13.45 MiB | 3.24 MiB/s, done.
Resolving deltas: 100% (7951/7951), done.
Note: switching to 'd1f4f6fc09cd3134e8ea5ba4e0bd2db4e8002ed8'.
You are in 'detached HEAD' state. You can look around, make experimental
changes and commit them, and you can discard any commits you make in this
state without impacting any branches by switching back to a branch.
If you want to create a new branch to retain commits you create, you may
do so (now or later) by using -c with the switch command. Example:
git switch -c
Or undo this operation with:
git switch -
Turn off this advice by setting config variable advice.detachedHead to false
Updating files: 100% (2971/2971), done.
# 修改 deployment,注意修改的是镜像和初始化参数。
[root@k8s-master01 ~]# cd /root/metrics-server/deploy/1.8+/
[root@k8s-master01 1.8+]# vim metrics-server-deployment.yaml
按图中添加下面选项
hostNetwork: true
image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server-amd64:v0.3.6
args:
- --kubelet-insecure-tls
- --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP
# 安装 metrics-server。
[root@localhost 1.8+]# cp metrics-server-deployment.yaml metrics-server-deployment.yaml.bak
[root@localhost 1.8+]# vim metrics-server-deployment.yaml
[root@localhost 1.8+]# kubectl apply -f ./
clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created
clusterrolebinding.rbac.authorization.k8s.io/metrics-server:system:auth-delegator created
rolebinding.rbac.authorization.k8s.io/metrics-server-auth-reader created
apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created
serviceaccount/metrics-server created
deployment.apps/metrics-server created
service/metrics-server created
clusterrole.rbac.authorization.k8s.io/system:metrics-server created
clusterrolebinding.rbac.authorization.k8s.io/system:metrics-server created
# 查看 pod 运行情况。
[root@localhost 1.8+]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-9d85f5447-5959v 0/1 Running 155 3d18h
coredns-9d85f5447-gvqxh 0/1 CrashLoopBackOff 155 3d18h
etcd-localhost.localdomain.k8s.master 1/1 Running 1 3d18h
kube-apiserver-localhost.localdomain.k8s.master 1/1 Running 9 3d18h
kube-controller-manager-localhost.localdomain.k8s.master 1/1 Running 2 3d18h
kube-proxy-7dc95 1/1 Running 2 3d17h
kube-proxy-7hss2 1/1 Running 1 3d17h
kube-proxy-rpnvx 1/1 Running 1 3d18h
kube-scheduler-localhost.localdomain.k8s.master 1/1 Running 2 3d18h
metrics-server-6b976979db-j5g6s 1/1 Running 0 74s
# 使用 kubectl top node 查看资源使用情况。
[root@localhost 1.8+]# kubectl top pod -n kube-system
NAME CPU(cores) MEMORY(bytes)
coredns-9d85f5447-gvqxh 0m 0Mi
etcd-localhost.localdomain.k8s.master 27m 83Mi
kube-apiserver-localhost.localdomain.k8s.master 71m 352Mi
kube-controller-manager-localhost.localdomain.k8s.master 39m 53Mi
kube-proxy-7dc95 1m 16Mi
kube-proxy-7hss2 2m 16Mi
kube-proxy-rpnvx 1m 20Mi
kube-scheduler-localhost.localdomain.k8s.master 11m 25Mi
metrics-server-6b976979db-j5g6s 2m 10Mi
# 至此,metrics-server 安装完成。
# 创建 deployment。
[root@localhost ~]# kubectl run nginx --image=nginx:1.17.1 --requests=cpu=100m -n dev
kubectl run --generator=deployment/apps.v1 is DEPRECATED and will be removed in a future version. Use kubectl run --generator=run-pod/v1 or kubectl create instead.
deployment.apps/nginx created
[root@localhost ~]# kubectl get deploy,pod -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx 1/1 1 1 4s
NAME READY STATUS RESTARTS AGE
pod/nginx-778cb5fb7b-dgntb 1/1 Running 0 4s
# 创建 service。
[root@localhost ~]# kubectl expose deployment nginx --type=NodePort --port=80 -n dev
service/nginx exposed
# 查看。
[root@localhost ~]# kubectl get deploy,pods,services -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx 1/1 1 1 2m3s
NAME READY STATUS RESTARTS AGE
pod/nginx-778cb5fb7b-dgntb 1/1 Running 0 2m3s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/nginx NodePort 10.107.80.13 80:30994/TCP 73s
6.4.3 部署 HPA。
创建 pc-hpa.yaml 文件,内容如下。
apiVersion: autoscaling/v1
kind: HorizontalPodAutoscaler
metadata:
name: pc-hpa
namespace: dev
spec:
minReplicas: 1 # 最小 pod 数量。
maxReplicas: 10 # 最大 pod 数量。
targetCPUUtilizationPercentage: 3 # CPU 使用率指标。
scaleTargetRef: # 指定要控制的 nginx 信息。
apiVersion: /v1
kind: Deployment
name: nginx
name: nginx 指pod/nginx-778cb5fb7b-dgntb
[root@localhost k8s]# kubectl get deploy,pods,services -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/nginx 1/1 1 1 8m24s
NAME READY STATUS RESTARTS AGE
pod/nginx-778cb5fb7b-c4fv5 1/1 Running 0 8m24s
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
service/nginx NodePort 10.97.197.88 80:32496/TCP 7m52s
# 创建 hpa。
[root@localhost k8s]# kubectl create -f pc-hpa.yaml
horizontalpodautoscaler.autoscaling/pc-hpa created
# 查看 hpa。
[root@localhost k8s]# kubectl get hpa -n dev
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
pc-hpa Deployment/nginx /3% 1 10 0 24s
[root@localhost k8s]# kubectl get deploy -n dev -w
NAME READY UP-TO-DATE AVAILABLE AGE
nginx 1/1 1 1 9m56s
[root@localhost k8s]# kubectl get hpa -n dev -w
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
pc-hpa Deployment/nginx /3% 1 10 0 3m15s
6.4.4 测试。
使用压测工具对 service 地址 192.168.5.4:31830 进行压测,然后通过控制台查看 hpa 和 pod 的变化。
[root@localhost k8s]# kubectl get hpa -n dev -w
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
pc-hpa Deployment/nginx /3% 1 10 0 2m37s
[root@localhost k8s]# kubectl get deployment -n dev -w
NAME READY UP-TO-DATE AVAILABLE AGE
nginx 1/1 1 1 4m
nginx-deployment 3/3 3 3 145m
tomcat-deployment 3/3 3 3 145m
[root@localhost k8s]# kubectl get pods -n dev -w
NAME READY STATUS RESTARTS AGE
nginx-778cb5fb7b-c4fv5 1/1 Running 0 4m25s
hpa 变化。
[root@k8s-master01 ~]# kubectl get hpa -n dev -w
NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE
deployment 变化。
[root@k8s-master01 ~]# kubectl get deployment -n dev -w
NAME READY UP-TO-DATE AVAILABLE AGE
pod 变化。
[root@k8s-master01 ~]# kubectl get pods -n dev -w
NAME READY STATUS RESTARTS AGE
6.5 DaemonSet(DS)。
DaemonSet 类型的控制器可以保证在集群中的每一台(或指定)节点上都运行一个副本。一般适用于日志收集、节点监控等场景。也就是说,如果一个 Pod 提供的功能是节点级别的(每个节点都需要且只需要一个),那么这类 Pod 就适合使用 DaemonSet 类型的控制器创建。
在 kubernetes 中,pod 是应用程序的载体,我们可以通过 pod 的 ip 来访问应用程序,但是 pod 的 ip 地址不是固定的,这也就意味着不方便直接采用 pod 的 ip 对服务进行访问。
为了解决这个问题,kubernetes 提供了 Service 资源,Service 会对提供同一个服务的多个 pod 进行聚合,并且提供一个统一的入口地址。通过访问 Service 的入口地址就能访问到后面的 pod 服务。
Service 在很多情况下只是一个概念,真正起作用的其实是 kube-proxy 服务进程,每个 Node 节点上都运行着一个 kube-proxy 服务进程。当创建 Service 的时候会通过 api-server 向 etcd 写入创建的 service 的信息,而 kube-proxy 会基于监听的机制发现这种 Service 的变动,然后 ta 会将最新的 Service 信息转换成对应的访问规则。
# 10.97.97.97:80 是 service 提供的访问入口。
# 当访问这个入口的时候,可以发现后面有三个 pod 的服务在等待调用,
# kube-proxy 会基于 rr(轮询)的策略,将请求分发到其中一个 pod 上去。
# 这个规则会同时在集群内的所有节点上都生成,所以在任何一个节点上,都可以访问。
[root@localhost k8s]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
# 没有对应规则,未开启。
userspace 模式下,kube-proxy 会为每一个 Service 创建一个监听端口,发向 Cluster IP 的请求被 Iptables 规则重定向到 kube-proxy 监听的端口上,kube-proxy 根据 LB 算法选择一个提供服务的 Pod 并和其建立链接,以将请求转发到 Pod 上。该模式下,kube-proxy 充当了一个四层负责均衡器的角色。由于 kube-proxy 运行在 userspace 中,在进行转发处理时会增加内核和用户空间之间的数据拷贝,虽然比较稳定,但是效率比较低。
7.1.2 iptables 模式。
iptables 模式下,kube-proxy 为 service 后端的每个 Pod 创建对应的 iptables 规则,直接将发向 Cluster IP 的请求重定向到一个 Pod IP。该模式下 kube-proxy 不承担四层负责均衡器的角色,只负责创建 iptables 规则。该模式的优点是较 userspace 模式效率更高,但不能提供灵活的 LB 策略,当后端 Pod 不可用时也无法进行重试。
在某些场景中,开发人员可能不想使用 Service 提供的负载均衡功能,而希望自己来控制负载均衡策略,针对这种情况,kubernetes 提供了 HeadLiness Service,这类 Service 不会分配 Cluster IP,如果想要访问 service,只能通过 service 的域名进行查询。
# 创建 service。
[root@localhost k8s]# vim service-headliness.yaml
[root@localhost k8s]# kubectl create -f service-headliness.yaml
service/service-headliness created
# 获取 service,发现 CLUSTER-IP 未分配。
[root@localhost k8s]# kubectl get svc service-headliness -n dev -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service-headliness ClusterIP None 80/TCP 14s app=nginx-pod
# 查看 service 详情。
[root@localhost k8s]# kubectl describe svc service-headliness -n dev
Name: service-headliness
Namespace: dev
Labels:
Annotations:
Selector: app=nginx-pod
Type: ClusterIP
IP: None
Port: 80/TCP
TargetPort: 80/TCP
Endpoints: 10.244.1.62:80,10.244.1.63:80,10.244.2.64:80
Session Affinity: None
Events:
# 查看域名的解析情况。进入其中一台 pod。
[root@localhost k8s]# kubectl get pods -n dev
NAME READY STATUS RESTARTS AGE
nginx-778cb5fb7b-dgntb 1/1 Running 1 3d4h
pc-deployment-6696798b78-9s6pb 1/1 Running 1 7h25m
pc-deployment-6696798b78-fz6xg 1/1 Running 1 7h25m
pc-deployment-6696798b78-gp6fs 1/1 Running 1 7h25m
[root@localhost k8s]# kubectl exec -it pc-deployment-6696798b78-9s6pb -n dev /bin/bash
root@pc-deployment-6696798b78-9s6pb:/# cat /etc/resolv.conf
nameserver 10.96.0.10
search dev.svc.cluster.local svc.cluster.local cluster.local localdomain.k8s.node1
options ndots:5
# @nameserver(域名服务器) + 服务名称(service 名称 + namespace(dev)【dev.svc.cluster.local】)
[root@localhost k8s]# dig @10.96.0.10 service-headliness.dev.svc.cluster.local
; > DiG 9.11.4-P2-RedHat-9.11.4-26.P2.el7_9.10 > @10.96.0.10 service-headliness.dev.svc.cluster.local
; (1 server found)
;; global options: +cmd
;; Got answer:
;; WARNING: .local is reserved for Multicast DNS
;; You are currently testing what happens when an mDNS query is leaked to DNS
;; ->>HEADER
7.3.5 NodePort 类型的 Service。
在之前的样例中,创建的 Service 的 ip 地址只有集群内部才可以访问,如果希望将 Service 暴露给集群外部使用,那么就要使用到另外一种类型的 Service,称为 NodePort 类型。NodePort 的工作原理其实就是将 service 的端口映射到 Node 的一个端口上,然后就可以通过 NodeIp:NodePort 来访问 service 了。
创建 service-nodeport.yaml。
apiVersion: v1
kind: Service
metadata:
name: service-nodeport
namespace: dev
spec:
selector:
app: nginx-pod
type: NodePort # service 类型。
ports:
- port: 80
nodePort: 30002 # 指定绑定的 node 的端口(默认的取值范围是:30000-32767),如果不指定,会默认分配。
targetPort: 80
# 创建 service。
[root@localhost k8s]# vim service-nodeport.yaml
[root@localhost k8s]# kubectl create -f service-nodeport.yaml
service/service-nodeport created
# 查看 service。
[root@localhost k8s]# kubectl get svc -n dev -o wide
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
service-nodeport NodePort 10.110.23.187 80:30002/TCP 28s app=nginx-pod
# 接下来可以通过电脑主机的浏览器去访问集群中任意一个 nodeip 的 30002 端口,即可访问到 pod。
192.168.142.150:30002
ExternalName 类型的 Service 用于引入集群外部的服务,ta 通过 externalName 属性指定外部一个服务的地址,然后在集群内部访问此 service 就可以访问到外部的服务了。
apiVersion: v1
kind: Service
metadata:
name: service-externalname
namespace: dev
spec:
type: ExternalName # service 类型。
externalName: www.baidu.com # 改成 ip 地址也可以。
# 创建 service。
[root@localhost k8s]# kubectl create -f service-externalname.yaml
service/service-externalname created
# 域名解析。
[root@localhost k8s]# dig @10.96.0.10 service-externalname.dev.svc.cluster.local
; > DiG 9.11.4-P2-RedHat-9.11.4-26.P2.el7_9.10 > @10.96.0.10 service-externalname.dev.svc.cluster.local
; (1 server found)
;; global options: +cmd
;; Got answer:
;; WARNING: .local is reserved for Multicast DNS
;; You are currently testing what happens when an mDNS query is leaked to DNS
;; ->>HEADER
# 创建。
[root@localhost ingress_controller]# kubectl create -f tomcat-nginx.yaml
deployment.apps/nginx-deployment created
deployment.apps/tomcat-deployment created
service/nginx-service created
service/tomcat-service created
# 查看。
[root@localhost ingress_controller]# kubectl get svc -n dev
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
nginx-service ClusterIP None 80/TCP 28s
tomcat-service ClusterIP None 8080/TCP 28s
[root@localhost ingress_controller]# kubectl get deployment -n dev
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment 3/3 3 3 55s
tomcat-deployment 3/3 3 3 55s