k8s安装greptimedb

环境说明

类型 版本
k8s(3节点) 1.26.6
8c16g 安装时,限制到1c1g安装,总的资源也不足
变配到16c24g

在线安装

  1. 部署etcd

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    [root@node3 greptime]# helm upgrade \
    --install etcd oci://greptime-registry.cn-hangzhou.cr.aliyuncs.com/charts/etcd \
    --create-namespace \
    --version 11.3.4 \
    -n etcd-cluster --values etcd-values.yaml

    [root@node3 greptime]# kubectl get pod -n etcd-cluster
    NAME READY STATUS RESTARTS AGE
    etcd-0 1/1 Running 0 79s
    etcd-1 1/1 Running 0 79s
    etcd-2 1/1 Running 0 79s

    [root@node3 greptime]# kubectl -n etcd-cluster exec etcd-0 -- etcdctl \
    --endpoints=etcd-0.etcd-headless.etcd-cluster:2379,etcd-1.etcd-headless.etcd-cluster:2379,etcd-2.etcd-headless.etcd-cluster:2379 \
    endpoint status -w table
    +--------------------------------------------------------------------------------+
    | Endpoint | ID | Ver | DB | Ldr | Lrn | Trm | Idx | App | Err |
    |----------------|-----------|-------|-------|-----|-----|-----|-----|-----|-----|
    | etcd-0...:2379 | 680910... | 3.5.21 | 20kB | ✘ | ✘ | 2 | 38 | 38 | |
    | etcd-1...:2379 | d6980d... | 3.5.21 | 20kB | ✔ | ✘ | 2 | 38 | 38 | |
    | etcd-2...:2379 | 12664f... | 3.5.21 | 20kB | ✘ | ✘ | 2 | 38 | 38 | |
    +--------------------------------------------------------------------------------+
  2. 部署minio

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    [root@node3 greptime]# helm upgrade \
    --install minio oci://greptime-registry.cn-hangzhou.cr.aliyuncs.com/charts/minio \
    --create-namespace \
    --version 16.0.10 \
    -n minio \
    --values minio-values.yaml

    [root@node3 greptime]# kubectl get pod -n minio
    NAME READY STATUS RESTARTS AGE
    minio-0 1/1 Running 0 44s
    minio-1 1/1 Running 0 44s
    minio-2 1/1 Running 0 44s
    minio-3 1/1 Running 0 44s
    # 暴露端口
    [root@node3 greptime]# kubectl apply -f minio-NodePort.yaml
  3. 登录http://192.168.10.213:30393/login,账号密码:greptimedbadmin/greptimedbadmin

  4. 创建bucket名称为:greptimedb-bucket

  5. 创建Create Access Keys得到key=JffCiPPRg1CfcI4li582,sec=IippU4XmqqIQBBPcROUi2paeABFbNwhfl6UXgOIM

  6. 部署GreptimeDB Operator

    1
    2
    3
    4
    5
    6
    7
    8
    9
    [root@node3 greptime]# helm upgrade --install \
    greptimedb-operator oci://greptime-registry.cn-hangzhou.cr.aliyuncs.com/charts/greptimedb-operator \
    --version 0.2.21 \
    --namespace greptimedb-admin \
    --create-namespace \
    --values greptimedb-operator-values.yaml
    [root@node3 greptime]# kubectl get pod -n greptimedb-admin
    NAME READY STATUS RESTARTS AGE
    greptimedb-operator-d656cb5c-zkdq9 1/1 Running 0 43s
  7. 部署db集群

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    [root@node3 greptime]# kubectl create namespace greptimedb
    namespace/greptimedb created
    # 创建一个secrets配置Secret: greptimedb / image-pull-secret
    [root@node3 greptime]# kubectl create secret docker-registry image-pull-secret \
    -n greptimedb \
    --docker-server=https://greptime-registry.cn-hangzhou.cr.aliyuncs.com \
    --docker-username=fxkjtkjj@1826388469660986 \
    --docker-password=1P7a9v7M22
    #安装greptimedb,注意k8s资源,8c16g跑不起,缺cpu,变配虚拟机到16c24g
    [root@node3 greptime]# helm upgrade --install greptimedb \
    --create-namespace \
    oci://greptime-registry.cn-hangzhou.cr.aliyuncs.com/charts/greptimedb-cluster \
    --version 0.3.18 \
    -n greptimedb \
    --values greptimedb-cluster-values.yaml
    [root@node3 greptime]# kubectl get po -n greptimedb
    NAME READY STATUS RESTARTS AGE
    greptimedb-datanode-0 2/2 Running 0 2m18s
    greptimedb-datanode-1 2/2 Running 0 2m18s
    greptimedb-datanode-2 2/2 Running 0 2m18s
    greptimedb-flownode-0 2/2 Running 0 2m1s
    greptimedb-frontend-5468844cd-br4sc 2/2 Running 0 2m7s
    greptimedb-frontend-5468844cd-nrhf6 2/2 Running 0 2m7s
    greptimedb-frontend-5468844cd-swbxv 2/2 Running 0 2m7s
    greptimedb-meta-5b74964-6m7nt 2/2 Running 3 (2m54s ago) 20m
    greptimedb-meta-5b74964-7sg9p 2/2 Running 2 (2m41s ago) 20m
    greptimedb-meta-5b74964-gxqn8 2/2 Running 4 (2m50s ago) 20m
    greptimedb-monitor-standalone-0 1/1 Running 1 (9m44s ago) 20m
  8. 部署Dashboard

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    [root@node3 greptime]# helm upgrade --install enterprise-dashboard \
    --create-namespace \
    oci://greptime-registry.cn-hangzhou.cr.aliyuncs.com/charts/enterprise-dashboard \
    -n dashboard \
    --version 0.1.1 \
    --values dashboard-values.yaml
    [root@node3 greptime]# kubectl get pod -n dashboard
    NAME READY STATUS RESTARTS AGE
    enterprise-dashboard-7d75cbff97-q7t87 1/1 Running 0 104s
    # 暴露端口
    [root@node3 greptime]# kubectl apply -f dashboard-NodePort.yaml
  9. 访问http://192.168.10.213:31905/

  10. 验证,也可以在dashboard里面查询

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    # 将 Kubernetes 集群中服务的 4002 端口映射到了本地机器的 4002 端口
    [root@node3 greptime]# kubectl port-forward svc/greptimedb-frontend 4002:4002 -n greptimedb > connections.out &
    [2] 38905
    [1] Exit 127 connections.out
    [root@node3 greptime]# cat connections.out
    Forwarding from 127.0.0.1:4002 -> 4002
    Forwarding from [::1]:4002 -> 4002
    # 安装工具
    [root@node3 greptime]# yum install -y mysql
    # 测试连接
    [root@node3 greptime]# mysql -h 127.0.0.1 -P 4002
    mysql>
    CREATE TABLE monitor (
    host STRING,
    ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP() TIME INDEX,
    cpu FLOAT64 DEFAULT 0,
    memory FLOAT64,
    PRIMARY KEY(host)
    );
    Query OK, 0 rows affected (0.29 sec)

    mysql>
    INSERT INTO monitor
    VALUES
    ("127.0.0.1", 1702433141000, 0.5, 0.2),
    ("127.0.0.2", 1702433141000, 0.3, 0.1),
    ("127.0.0.1", 1702433146000, 0.3, 0.2),
    ("127.0.0.2", 1702433146000, 0.2, 0.4),
    ("127.0.0.1", 1702433151000, 0.4, 0.3),
    ("127.0.0.2", 1702433151000, 0.2, 0.4);
    6 rows in set (0.01 sec)

    mysql> show tables;
    +---------+
    | Tables |
    +---------+
    | monitor |
    | numbers |
    +---------+
    2 rows in set (0.05 sec)
    mysql> select * from monitor;
    +-----------+---------------------+------+--------+
    | host | ts | cpu | memory |
    +-----------+---------------------+------+--------+
    | 127.0.0.1 | 2023-12-13 02:05:41 | 0.5 | 0.2 |
    | 127.0.0.1 | 2023-12-13 02:05:46 | 0.3 | 0.2 |
    | 127.0.0.1 | 2023-12-13 02:05:51 | 0.4 | 0.3 |
    | 127.0.0.2 | 2023-12-13 02:05:41 | 0.3 | 0.1 |
    | 127.0.0.2 | 2023-12-13 02:05:46 | 0.2 | 0.4 |
    | 127.0.0.2 | 2023-12-13 02:05:51 | 0.2 | 0.4 |
    +-----------+---------------------+------+--------+
    6 rows in set (0.04 sec)

相关配置文件

etcd-values.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
global:
security:
allowInsecureImages: true

image:
registry: greptime-registry.cn-hangzhou.cr.aliyuncs.com
repository: bitnami/etcd
tag: 3.5.21-debian-12-r5

replicaCount: 3

resources:
requests:
cpu: '2'
memory: 2Gi
limits:
cpu: '4'
memory: 4Gi

persistence:
storageClass: nfscs
size: 8Gi

auth:
rbac:
create: false
token:
enabled: false

autoCompactionMode: "periodic"
autoCompactionRetention: "1h"

extraEnvVars:
- name: ETCD_QUOTA_BACKEND_BYTES
value: "8589934592"
- name: ETCD_ELECTION_TIMEOUT
value: "2000"

minio-values.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
global:
security:
allowInsecureImages: true

image:
registry: greptime-registry.cn-hangzhou.cr.aliyuncs.com
repository: bitnami/minio
tag: 2025.4.22-debian-12-r1

auth:
rootUser: greptimedbadmin
rootPassword: "greptimedbadmin"

resources:
requests:
cpu: "2"
memory: 2Gi
limits:
cpu: "5"
memory: 5Gi

affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
podAffinityTerm:
topologyKey: kubernetes.io/hostname
labelSelector:
matchLabels:
app.kubernetes.io/instance: minio

extraEnvVars:
- name: MINIO_REGION
value: "ap-southeast-1"

statefulset:
replicaCount: 4

mode: distributed

persistence:
storageClass: nfssc
size: 200Gi

minio-NodePort.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
apiVersion: v1
kind: Service
metadata:
name: minio
namespace: minio
spec:
type: NodePort
ports:
- port: 9000 # Service 端口
targetPort: 9000 # Pod 端口
nodePort: 30090 # 映射在 Node 上的端口
selector:
app.kubernetes.io/name: minio

greptimedb-operator-values.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
image:
# 镜像仓库
registry: greptime-registry.cn-hangzhou.cr.aliyuncs.com
# 镜像名称
repository: greptime/greptimedb-operator
# 镜像拉取策略
imagePullPolicy: IfNotPresent
# 镜像标签
tag: v0.2.2
# 镜像拉取密钥(如需认证)
pullSecrets: []

# 附加标签
additionalLabels: {}

serviceAccount:
# 是否创建服务账号
create: true
# 服务账号注解
annotations: {}
# 指定服务账号名称(为空则自动生成)
name: ""

crds:
# 安装 CRDs
install: true
# 卸载时保留 CRDs
keep: true
# CRD 注解
annotations: {}
# CRD 附加标签
additionalLabels: {}

# 副本数
replicas: 1

resources:
# 资源限制
limits:
cpu: "2"
memory: 2Gi
# 资源请求
requests:
cpu: 500m
memory: 512Mi

rbac:
# 启用 RBAC
create: true

apiServer:
# 启用 API Server
enabled: true
# API Server 端口
port: 8081
# 启用 metrics-server 的 PodMetrics 获取
podMetrics:
enabled: true

# 命名覆盖
nameOverride: ""
fullnameOverride: ""

# 调度相关配置
nodeSelector: {}
tolerations: []
affinity: {}

greptimedb-cluster-values.yaml

缩减了资源,对主机进行了编配

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
image:
registry: greptime-registry.cn-hangzhou.cr.aliyuncs.com
repository: fxkjtkjj/greptimedb-enterprise
tag: ent-20250427-1745751842-e45e60bf
pullSecrets:
- image-pull-secret #这个参数每生效
additionalLabels: {}

initializer:
registry: greptime-registry.cn-hangzhou.cr.aliyuncs.com
repository: greptime/greptimedb-initializer
tag: v0.2.2
image:
pullSecrets:
- image-pull-secret #在后面每个节点单独加,不然拉取镜像会没权限

datanode:
image:
pullSecrets:
- image-pull-secret #在后面每个节点单独加,不然拉取镜像会没权限
replicas: 3
podTemplate:
main:
resources:
requests:
cpu: "1"
memory: 1Gi
limits:
cpu: "1"
memory: 1Gi
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.greptime.io/component: greptimedb-datanode
topologyKey: kubernetes.io/hostname
weight: 1
storage:
storageClassName: nfssc
storageSize: 30Gi
storageRetainPolicy: Retain

frontend:
image:
pullSecrets:
- image-pull-secret #在后面每个节点单独加,不然拉取镜像会没权限
enabled: true
replicas: 3
podTemplate:
main:
resources:
requests:
cpu: "1"
memory: 1Gi
limits:
cpu: "1"
memory: 1Gi
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.greptime.io/component: greptimedb-frontend
topologyKey: kubernetes.io/hostname
weight: 1

meta:
image:
pullSecrets:
- image-pull-secret #在后面每个节点单独加,不然拉取镜像会没权限
replicas: 3
etcdEndpoints: "etcd.etcd-cluster.svc.cluster.local:2379"
podTemplate:
main:
resources:
requests:
cpu: "1"
memory: 1Gi
limits:
cpu: "1"
memory: 1Gi
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.greptime.io/component: greptimedb-meta
topologyKey: kubernetes.io/hostname
weight: 1

objectStorage:
credentials:
accessKeyId: "JffCiPPRg1CfcI4li582"
secretAccessKey: "IippU4XmqqIQBBPcROUi2paeABFbNwhfl6UXgOIM"
s3:
bucket: "greptimedb-bucket"
region: "ap-southeast-1"
root: "greptimedb-data"
endpoint: "http://minio.minio:9000"

monitoring:
image:
pullSecrets:
- image-pull-secret #在后面每个节点单独加,不然拉取镜像会没权限
enabled: true
standalone:
base:
main:
resources:
requests:
cpu: "1"
memory: 1Gi
limits:
cpu: "1"
memory: 1Gi
datanodeStorage:
fs:
storageClassName: nfssc
storageSize: 100Gi
vector:
registry: greptime-registry.cn-hangzhou.cr.aliyuncs.com
repository: timberio/vector
tag: 0.46.1-debian
pullSecrets:
- image-pull-secret
resources:
requests:
cpu: "1"
memory: 1Gi
limits:
cpu: "1"
memory: "1Gi"

dashboard-values.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
replicaCount: 1

image:
repository: greptime-registry.cn-hangzhou.cr.aliyuncs.com/greptime/dashboard-apiserver
pullPolicy: IfNotPresent
tag: "770ed916-20250409110716"

imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""

logLevel: debug

operatorAddr: greptimedb-operator.greptimedb-admin.svc.cluster.local:8081
servicePort: 19095

serviceAccount:
create: true
annotations: {}
name: ""

podAnnotations: {}

podSecurityContext: {}
# fsGroup: 2000

securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000

service:
type: ClusterIP
port: 19095
annotations: {}

resources:
requests:
cpu: "1"
memory: 1Gi
limits:
cpu: "1"
memory: 1Gi

nodeSelector: {}

tolerations: []

affinity: {}

dashboard-NodePort.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
apiVersion: v1
kind: Service
metadata:
name: enterprise-dashboard
namespace: dashboard # 请替换为实际命名空间
spec:
type: NodePort
ports:
- port: 19095 # Service 内部访问端口
targetPort: 19095 # Pod 监听端口
nodePort: 31905 # 映射到 Node 上的端口,范围30000-32767
selector:
app.kubernetes.io/name: enterprise-dashboard