Kubernetes安装脚本
来自ling
目录
常用脚本
离线部署
1、禁用一些功能
# 关闭 防火墙
systemctl stop firewalld
systemctl disable firewalld
# 关闭 SeLinux
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
# 关闭 swap
swapoff -a
yes | cp /etc/fstab /etc/fstab_bak
cat /etc/fstab_bak |grep -v swap > /etc/fstab
下载sealos v3.3.8:https://github.com/fanux/sealos/releases/download/v3.3.8/sealos
安装sealos: chmod +x sealos && mv sealos /usr/bin
创建第一个master节点
sealos init --user root --passwd 1234567 \
--master 10.172.186.1 \
--pkg-url /root/K8S/kube1.18.0.tar.gz \
--version v1.18.0
添加节点:
sealos join --master 10.172.186.2
移除节点:
sealos clean --master 10.172.186.2
添加节点:
sealos clean --master 10.172.186.3
移除节点:
sealos join --master 10.172.186.3
安装dashboard:
sealos install --pkg-url dashboard.tar
获取token:
kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')
https://10.172.186.1:32000(火狐访问)
安装kuboard(类似dashboard):
http://10.172.186.1:32567/dashboard
# 如果您参考 www.kuboard.cn 提供的文档安装 Kuberenetes,可在第一个 Master 节点上执行此命令
获取token:
echo $(kubectl -n kube-system get secret $(kubectl -n kube-system get secret | grep kuboard-user | awk '{print $1}') -o go-template='{{.data.token}}' | base64 -d)
---------任意master节点执行如下命令------------
查看节点状态:
kubectl get nodes
查看所有服务(POD)状态和运行在那个work node节点
kubectl get po -o wide
查看某个服务(POD)的启动状态与日志
kubectl describe pod kubernetes-dashboard-7d9ddf9f8f-69skd(POD NAME) -n default(namespace)
删除某个服务
kubectl delete pod kuboard-57659779b8-sz6ns -n kube-system
发布服务
kubectl apply –f nginx-ingress-service-nodeport.yaml
移除服务
kubectl delete –f nginx-ingress-service-nodeport.yaml
替换配置及重新发布images
导入镜像 docker load < testservice.tar 启动镜像 docker run testservice:latest
查看容器 docker exec -it 8d8ed1be2f4b bash
替换容器配置 docker cp /testservice/appsettings.cof 8d8ed1be2f4b:/app/
保存新镜像 docker commit 2bc9c96258fe testservice:latest
将新镜像load进入所有work node节点
testservice-clusterip
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "testservice"
},
"spec": {
"type": "ClusterIP",
"selector": {
"component": "testservice"
},
"ports": [
{
"protocol": "TCP",
"port": 80,
"targetPort": 80
}
]
}
}
testservice-deployment
{
"kind": "Deployment",
"apiVersion": "apps/v1",
"metadata": {
"name": "testservice",
"namespace": "default",
"labels": {
"component": "testservice"
}
},
"spec": {
"replicas": 2,
"selector": {
"matchLabels": {
"component": "testservice"
}
},
"template": {
"metadata": {
"labels": {
"component": "testservice"
}
},
"spec": {
"containers": [
{
"name": "testservice",
"image": "testservice:latest",
"imagePullPolicy": "IfNotPresent",
"livenessProbe": {
"httpGet": {
"path": "/api/health/healthcheck",
"port": 80
},
"initialDelaySeconds": 30,
"timeoutSeconds": 5,
"periodSeconds": 30
},
"readinessProbe": {
"httpGet": {
"path": "api/health/healthcheck",
"port": 80
},
"initialDelaySeconds": 30,
"timeoutSeconds": 5,
"periodSeconds": 120
},
"resources": {
"requests": {
"memory": "400Mi"
},
"limits": {
"memory": "500Mi"
}
},
"ports": [
{
"containerPort": 80
}
],
"securityContext": {
"privileged": false
}
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30
}
},
"strategy": {
"type": "RollingUpdate",
"rollingUpdate": {
"maxUnavailable": "25%",
"maxSurge": "25%"
}
},
"revisionHistoryLimit": 10,
"progressDeadlineSeconds": 600
}
}
testservice-hpa
{
"apiVersion": "autoscaling/v2beta1",
"kind": "HorizontalPodAutoscaler",
"metadata": {
"name": "testservice"
},
"spec": {
"scaleTargetRef": {
"apiVersion": "apps/v1",
"kind": "Deployment",
"name": "testservice"
},
"minReplicas": 5,
"maxReplicas": 10,
"metrics": [
{
"type": "Resource",
"resource": {
"name": "cpu",
"targetAverageUtilization": 80
}
},
{
"type": "Resource",
"resource": {
"name": "memory",
"targetAverageUtilization": 80
}
}
]
}
}
Ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: nginx-ingress-service
namespace: default
annotations:
kubernetes.io/ingress.class: "nginx"
nginx.ingress.kubernetes.io/cors-allow-methods: "PUT, GET, POST, OPTIONS"
nginx.ingress.kubernetes.io/rewrite-target: "/$1"
nginx.ingress.kubernetes.io/ssl-redirect: "false"
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
nginx.org/client-max-body-size: "50m"
nginx.ingress.kubernetes.io/proxy-connect-timeout: "1800"
nginx.ingress.kubernetes.io/proxy-send-timeout: "1800"
nginx.ingress.kubernetes.io/proxy-read-timeout: "1800"
nginx.ingress.kubernetes.io/proxy-buffering: "on"
nginx.ingress.kubernetes.io/proxy-buffer-size: "8k"
nginx.ingress.kubernetes.io/proxy-buffers-number: "5"
spec:
rules:
- host: tpdigitaldoxmicroservices-pre.deloitte.com.cn
http:
paths:
- path: /test/?(.*)
backend:
serviceName: testservice
servicePort: 80
- path: /ingresstest/?(.*)
backend:
serviceName: ingress-demo
servicePort: 80
install_mandatory.yaml
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
data:
server-tokens: 'false'
metadata:
name: nginx-configuration
namespace: default
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: tcp-services
namespace: default
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: udp-services
namespace: default
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nginx-ingress-serviceaccount
namespace: default
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: nginx-ingress-clusterrole
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- "extensions"
resources:
- ingresses/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: nginx-ingress-role
namespace: default
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
# Defaults to "<election-id>-<ingress-class>"
# Here: "<ingress-controller-leader>-<nginx>"
# This has to be adapted if you change either parameter
# when launching the nginx-ingress-controller.
- "ingress-controller-leader-nginx"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: nginx-ingress-role-nisa-binding
namespace: default
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-ingress-role
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: nginx-ingress-clusterrole-nisa-binding
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-ingress-clusterrole
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-ingress-controller
namespace: default
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
annotations:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
spec:
serviceAccountName: nginx-ingress-serviceaccount
containers:
- name: nginx-ingress-controller
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.23.0
args:
- /nginx-ingress-controller
- --configmap=$(POD_NAMESPACE)/nginx-configuration
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
- --publish-service=$(POD_NAMESPACE)/ingress-nginx
- --annotations-prefix=nginx.ingress.kubernetes.io
securityContext:
allowPrivilegeEscalation: true
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
# www-data -> 33
runAsUser: 33
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- name: http
containerPort: 80
- name: https
containerPort: 443
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 10
nginx-ingress-service-nodeport.yaml
apiVersion: v1
kind: Service
metadata:
name: ingress-nginx
namespace: default
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
spec:
type: NodePort
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP
nodePort: 30080
- name: https
port: 443
targetPort: 443
protocol: TCP
nodePort: 30443
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx