K8s安装Kuboard
安装
在Mac或其他机器安装管理工具kuboard-spray
1
2
3
4
5
6
7
8docker run -d \
--privileged \
--restart=unless-stopped \
--name=kuboard-spray \
-p 80:80/tcp \
-v /var/run/docker.sock:/var/run/docker.sock \
-v ~/kuboard-spray-data:/data \
eipwork/kuboard-spray:latest-amd64访问http://localhost/#/login,输入用户名
admin
,默认密码Kuboard123
,即可登录 Kuboard-Spray 界面。点击
Add Cluster Installation Plad
输入集群名称,选择spray-v2.21.0c_k8s-v1.26.4_v4.4-amd64
点击OK
。点击
Add Node
添加一个节点,勾选control plane
,etcd node
,worker node
点击OK。右侧输入安装节点的ip=
172.16.3.165
,端口,密码,最底部输入etcd的名字etcd_exxk
,点击Validate Connection
最后点击
save
,然后点击Install/Setup K8S Cluster
按钮进行安装。等待安装完成,失败可以重复6
访问http://172.16.3.165默认用户名: admin默认密 码: Kuboard123
配置
方案一 :修改Kuboard端口
找到Kuboard的部署配置文件,
vi /etc/kubernetes/manifests/kuboard.yaml
修改1
2
3
4
5
6
7
8
9- env:
- name: KUBOARD_ENDPOINT
value: "http://172.16.3.165:14001" #把80修改为14001
name: kuboard
ports:
- containerPort: 80
hostPort: 14001 #hostPort修改为14001
name: web
protocol: TCP保存,等待自动重启。
知识点:static-pod:静态 Pod 在指定的节点上由 kubelet 守护进程直接管理,不需要 API 服务器监管。 与由控制面管理的 Pod(例如,Deployment) 不同;kubelet 监视每个静态 Pod(在它失败之后重新启动)。
特点:更改配置自动重启pod,无法删除pod,只能把配置文件移除目录才能删除,默认静态pod目录
/etc/kubernetes/manifests
。
方案二:修改Kuboard走ingress-nginx(失败,能访问界面,但是bash相关功能用不了)
在Kuboard管理界面的
Kuboard
命名空间创建service1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16apiVersion: v1
kind: Service
metadata:
labels:
k8s.kuboard.cn/name: kuboard-v3
name: kuboard-v3
namespace: kuboard
spec:
ports:
- protocol: TCP
port: 80
targetPort: 80
selector:
k8s.kuboard.cn/name: kuboard-v3
type: ClusterIP在Kuboard管理界面的
Kuboard
命名空间创建Ingress1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
labels:
k8s.kuboard.cn/name: kuboard-v3
name: kuboard
namespace: kuboard
spec:
ingressClassName: myingresscontroller #需要先安装IngressNginxController,使用安装时候的名字
rules:
- host: kuboard.iexxk.io #安装时候的域名后缀
http:
paths:
- backend:
service:
name: kuboard-v3
port:
number: 80
path: /
pathType: Prefix找到Kuboard的部署配置文件,
vi /etc/kubernetes/manifests/kuboard.yaml
修改1
2
3
4
5
6
7
8
9
10
11....省略其他配置
- env:
- name: KUBOARD_ENDPOINT
value: "http://172.16.3.165:14001" #把172.16.3.165:80修改为kuboard.iexxk.io
name: kuboard
ports:
- containerPort: 80
# hostPort: 14001 #hostPort这一行删除
name: web
protocol: TCP
.....省略保存,等待自动重启。
知识点:
本来想用静态pod的方式配置,发现添加到配置文件不生效,后来只能在界面的模式添加。
IngressNginxController简单来说就是一个nginx,进入pod容器里面可以看到nginx的相关配置,在使用服务配置了ingress后,会自动在ingress的pod里面生成相应的nginx配置,样例如下:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135## start server web.iexxk.io
server {
server_name web.iexxk.io ;
listen 80 ;
listen [::]:80 ;
listen 443 ssl http2 ;
listen [::]:443 ssl http2 ;
set $proxy_upstream_name "-";
ssl_certificate_by_lua_block {
certificate.call()
}
location / {
set $namespace "exxk";
set $ingress_name "web";
set $service_name "web";
set $service_port "80";
set $location_path "/";
set $global_rate_limit_exceeding n;
rewrite_by_lua_block {
lua_ingress.rewrite({
force_ssl_redirect = false,
ssl_redirect = true,
force_no_ssl_redirect = false,
preserve_trailing_slash = false,
use_port_in_redirects = false,
global_throttle = { namespace = "", limit = 0, window_size = 0, key = { }, ignored_cidrs = { } },
})
balancer.rewrite()
plugins.run()
}
# be careful with `access_by_lua_block` and `satisfy any` directives as satisfy any
# will always succeed when there's `access_by_lua_block` that does not have any lua code doing `ngx.exit(ngx.DECLINED)`
# other authentication method such as basic auth or external auth useless - all requests will be allowed.
#access_by_lua_block {
#}
header_filter_by_lua_block {
lua_ingress.header()
plugins.run()
}
body_filter_by_lua_block {
plugins.run()
}
log_by_lua_block {
balancer.log()
monitor.call()
plugins.run()
}
port_in_redirect off;
set $balancer_ewma_score -1;
set $proxy_upstream_name "exxk-web-80";
set $proxy_host $proxy_upstream_name;
set $pass_access_scheme $scheme;
set $pass_server_port $server_port;
set $best_http_host $http_host;
set $pass_port $pass_server_port;
set $proxy_alternative_upstream_name "";
client_max_body_size 1m;
proxy_set_header Host $best_http_host;
# Pass the extracted client certificate to the backend
# Allow websocket connections
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header X-Request-ID $req_id;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Host $best_http_host;
proxy_set_header X-Forwarded-Port $pass_port;
proxy_set_header X-Forwarded-Proto $pass_access_scheme;
proxy_set_header X-Forwarded-Scheme $pass_access_scheme;
proxy_set_header X-Scheme $pass_access_scheme;
# Pass the original X-Forwarded-For
proxy_set_header X-Original-Forwarded-For $http_x_forwarded_for;
# mitigate HTTPoxy Vulnerability
# https://www.nginx.com/blog/mitigating-the-httpoxy-vulnerability-with-nginx/
proxy_set_header Proxy "";
# Custom headers to proxied server
proxy_connect_timeout 5s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
proxy_buffering off;
proxy_buffer_size 4k;
proxy_buffers 4 4k;
proxy_max_temp_file_size 1024m;
proxy_request_buffering on;
proxy_http_version 1.1;
proxy_cookie_domain off;
proxy_cookie_path off;
# In case of errors try the next upstream server before returning an error
proxy_next_upstream error timeout;
proxy_next_upstream_timeout 0;
proxy_next_upstream_tries 3;
proxy_pass http://upstream_balancer;
proxy_redirect off;
}
}
## end server web.iexxk.io
安装IngressNginxController
在集群的
集群管理
–>网络
–>IngressClass
列表页点击图中的安装 IngressNginxController 并创建 IngressClass
的按钮,输入名称myingresscontroller
。查看界面上的端口提示信息
1
2
3
4
5负载均衡映射
建议使用 Kubernetes 集群外的负载均衡器,对如下端口设置 L4 转发(不能通过 X-FORWARDED-FOR 追溯源地址) 或 L7 转发(部分负载均衡产品配置 L7 转发较繁琐)
(如果您已完成转发设置,请忽略此消息)。
负载均衡的 80 端口转发至 Kubernetes 集群任意节点的 32211
负载均衡的 443 端口转发至 Kubernetes 集群任意节点的 31612方案一(比较节约资源,但是80端口被占用,不能做更多的用途),修改容器的hostPort端口为80,然后直接通过域名即可访问。(修改myingresscontroller的no dePort32211端口为80,但是k8s集群的nodePort端口为30000~40000)
方案二(多搭建了一个nginx服务,但是灵活性更高)
配置外部nginx,创建一个static-pod的nginx服务。
在
/etc/kubernetes/manifests/
目录创建一个static-nginx.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24apiVersion: v1
kind: Pod
metadata:
annotations: {}
labels:
k8s.kuboard.cn/name: static-nginx
name: static-nginx
namespace: ingress-nginx
spec:
containers:
- name: web
image: nginx:alpine
ports:
- name: web
containerPort: 80
hostPort: 80
protocol: TCP
volumeMounts:
- mountPath: /etc/nginx/conf.d/default.conf
name: nginx-conf
volumes:
- hostPath:
path: "/root/static-nginx/nginx.conf"
name: nginx-conf在目录
/root/static-nginx
创建nginx.conf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16server {
listen 80;
server_name .iexxk.io;
#access_log /var/log/nginx/hoddst.access.log main;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://172.16.3.165:32211/;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}创建个测试web,ingress设置为web.iexxk.io进行访问即可,记得映射域名
*.iexxk.io
到172.16.3.165
主机上。1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: {}
labels:
k8s.kuboard.cn/name: web
name: web
namespace: exxk
resourceVersion: '126840'
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s.kuboard.cn/name: web
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
k8s.kuboard.cn/name: web
spec:
containers:
- image: 'nginx:alpine'
imagePullPolicy: IfNotPresent
name: web
ports:
- containerPort: 80
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status:
availableReplicas: 1
conditions:
- lastTransitionTime: '2023-08-30T03:52:17Z'
lastUpdateTime: '2023-08-30T03:52:17Z'
message: Deployment has minimum availability.
reason: MinimumReplicasAvailable
status: 'True'
type: Available
- lastTransitionTime: '2023-08-30T03:52:16Z'
lastUpdateTime: '2023-08-30T03:52:17Z'
message: ReplicaSet "web-6f8fdd7f55" has successfully progressed.
reason: NewReplicaSetAvailable
status: 'True'
type: Progressing
observedGeneration: 1
readyReplicas: 1
replicas: 1
updatedReplicas: 1
apiVersion: v1
kind: Service
metadata:
annotations: {}
labels:
k8s.kuboard.cn/name: web
name: web
namespace: exxk
resourceVersion: '126824'
spec:
clusterIP: 10.233.80.181
clusterIPs:
- 10.233.80.181
internalTrafficPolicy: Cluster
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: gre3pw
port: 80
protocol: TCP
targetPort: 80
selector:
k8s.kuboard.cn/name: web
sessionAffinity: None
type: ClusterIP
status:
loadBalancer: {}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations: {}
labels:
k8s.kuboard.cn/name: web
name: web
namespace: exxk
resourceVersion: '128138'
spec:
ingressClassName: myingresscontroller
rules:
- host: web.iexxk.io
http:
paths:
- backend:
service:
name: web
port:
number: 80
path: /
pathType: Prefix
status:
loadBalancer:
ingress:
- ip: 172.16.3.165额外,如果要不同域名对应不同的集群,nginx设置如下
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32server {
listen 80;
server_name .iexxk.io;
#access_log /var/log/nginx/hoddst.access.log main;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://172.16.3.165:32211/;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
server {
listen 80;
server_name .test.io;
#access_log /var/log/nginx/hoddst.access.log main;
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://172.16.3.160:32211/;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root /usr/share/nginx/html;
}
}
常见问题
k8s节点ip发生变化,然后提示如下错误
1
2
3
4
5
6
7[root@exxk gate3]# kubectl get pods
E0511 15:13:07.676420 13114 memcache.go:265] couldn't get current server API group list: Get "https://127.0.0.1:6443/api?timeout=32s": dial tcp 127.0.0.1:6443: connect: connection refused
E0511 15:13:07.676695 13114 memcache.go:265] couldn't get current server API group list: Get "https://127.0.0.1:6443/api?timeout=32s": dial tcp 127.0.0.1:6443: connect: connection refused
E0511 15:13:07.678306 13114 memcache.go:265] couldn't get current server API group list: Get "https://127.0.0.1:6443/api?timeout=32s": dial tcp 127.0.0.1:6443: connect: connection refused
E0511 15:13:07.679714 13114 memcache.go:265] couldn't get current server API group list: Get "https://127.0.0.1:6443/api?timeout=32s": dial tcp 127.0.0.1:6443: connect: connection refused
E0511 15:13:07.680858 13114 memcache.go:265] couldn't get current server API group list: Get "https://127.0.0.1:6443/api?timeout=32s": dial tcp 127.0.0.1:6443: connect: connection refused
The connection to the server 127.0.0.1:6443 was refused - did you specify the right host or port?解决:清理重装,参考https://zhuanlan.zhihu.com/p/621412584
重装后,出现错误
container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized
执行systemctl restart containerd.service
kuboard启动报错,错误信息如下:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34kubectl logs kuboard-v3-master -n kuboard
28 | error | 认证模块初始化失败:Get "http://127.0.0.1:5556/sso/.well-known/openid-configuration": dial tcp 127.0.0.1:5556: connect: connection refused
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x48 pc=0xd73477]
goroutine 1 [running]:
github.com/coreos/go-oidc.(*Provider).Verifier(...)
/usr/src/kuboard/third-party/go-oidc/verify.go:111
github.com/shaohq/kuboard/server/login.AddLoginRoutes(0xc000103ba0)
/usr/src/kuboard/server/login/login.go:30 +0xf7
main.getRoutes()
/usr/src/kuboard/server/kuboard-server.go:193 +0x345
main.main()
/usr/src/kuboard/server/kuboard-server.go:65 +0x185
启动 kuboard-server 失败,此问题通常是因为 Etcd 未能及时启动或者连接不上,系统将在 15 秒后重新尝试:
1. 如果您使用 docker run 的方式运行 Kuboard,请耐心等候一会儿或者执行 docker restart kuboard;
2. 如果您将 Kuboard 安装在 Kubernetes 中,请检查 kuboard/kuboard-etcd 是否正常启动。
认证模块:使用本地用户库
...
[LOG] 2024/12/04 - 16:46:30.352 | /common/etcd.client_config 24 | info | KUBOARD_ETCD_ENDPOINTS=[127.0.0.1:2379]
[LOG] 2024/12/04 - 16:46:30.352 | /common/etcd.client_config 52 | info | {[127.0.0.1:2379] 0s 1s 0s 0s 0 0 <nil> false [] <nil> <nil> <nil> false}
[LOG] 2024/12/04 - 16:46:30.353 | /initializekuboard.InitializeEtcd 39 | info | 初始化 ./init-etcd-scripts/audit-policy-once.yaml
{"level":"warn","ts":"2024-12-04T16:46:32.313+0800","caller":"clientv3/retry_interceptor.go:61","msg":"retrying of unary invoker failed","target":"endpoint://client-ac1ea4fe-6b5e-43ba-8c9f-84931dbe782a/127.0.0.1:2379","attempt":0,"error":"rpc error: code = DeadlineExceeded desc = context deadline exceeded"}
failed to initialize server: server: failed to list connector objects from storage: context deadline exceeded
{"level":"info","ts":"2024-12-04T16:46:34.220+0800","caller":"etcdserver/server.go:469","msg":"recovered v3 backend from snapshot","backend-size-bytes":1955713024,"backend-size":"2.0 GB","backend-size-in-use-bytes":1955692544,"backend-size-in-use":"2.0 GB"}
{"level":"info","ts":"2024-12-04T16:46:34.293+0800","caller":"etcdserver/raft.go:536","msg":"restarting local member","cluster-id":"f9f44c4ba0e96dd8","local-member-id":"59a9c584ea2c3f35","commit-index":5529395}
{"level":"info","ts":"2024-12-04T16:46:34.293+0800","caller":"raft/raft.go:1530","msg":"59a9c584ea2c3f35 switched to configuration voters=(6460912315094810421)"}
{"level":"info","ts":"2024-12-04T16:46:34.293+0800","caller":"raft/raft.go:700","msg":"59a9c584ea2c3f35 became follower at term 124"}
{"level":"info","ts":"2024-12-04T16:46:34.294+0800","caller":"raft/raft.go:383","msg":"newRaft 59a9c584ea2c3f35 [peers: [59a9c584ea2c3f35], term: 124, commit: 5529395, applied: 5520552, lastindex: 5529395, lastterm: 124]"}
{"level":"info","ts":"2024-12-04T16:46:34.294+0800","caller":"api/capability.go:76","msg":"enabled capabilities for version","cluster-version":"3.4"}
{"level":"info","ts":"2024-12-04T16:46:34.294+0800","caller":"membership/cluster.go:256","msg":"recovered/added member from store","cluster-id":"f9f44c4ba0e96dd8","local-member-id":"59a9c584ea2c3f35","recovered-remote-peer-id":"59a9c584ea2c3f35","recovered-remote-peer-urls":["http://0.0.0.0:2380"]}
{"level":"info","ts":"2024-12-04T16:46:34.294+0800","caller":"membership/cluster.go:269","msg":"set cluster version from store","cluster-version":"3.4"}
{"level":"warn","ts":"2024-12-04T16:46:34.294+0800","caller":"auth/store.go:1366","msg":"simple token is not cryptographically signed"}解决:错误分析认证模块初始化失败应该可以忽略,后面会使用本地用户库进行认证,后面就是连接etcd,可能比较耗时,超过了启动探针时间,就认为启动失败了,因此修改启动探针时间即可。
1
2
3
4
5
6
7
8
9
10
11
12
13#修改启动探针的时间,防止还没启动完就被终结了
vim /etc/kubernetes/manifests/kuboard.yaml
readinessProbe:
failureThreshold: 3
httpGet:
path: /kuboard-resources/version.json
port: 80
scheme: HTTP
initialDelaySeconds: 60 #原来是30s,修改为60s
#进入容器
kubectl exec -it kuboard-v3-master -n kuboard -- /bin/sh
#手动执行,这一步会提示已经启动,应该不是重要的,重要的是修改启动探针时间
etcd &