0%

Docker 部署简单的 Cortex 集群环境

在使用 docker 的时候,常会需要自己去部署一个简单的 环境,我们希望可以对多个容器统一管理。但是自己一开始常常不会写这个,因此这里记录下 docker-compose 的书写。

Docker Composer 示例

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
version: "3"
services:
cortex-nginx:
image: nginx:latest
ports:
- 80:80
- 443:443
volumes:
- /etc/nginx/nginx.conf:/etc/nginx/nginx.conf
- /root/log/error.log:/etc/nginx/logs/error.log
- /root/log/access.log:/etc/nginx/logs/access.log
restart: on-failure

cortex-configs:
image: cortexproject/cortex:master-54f10c2b1
command: ["/bin/cortex" , "-config.file=/etc/cortex.yaml" , "-target=configs"]
volumes:
- /etc/cortex/configs.yaml:/etc/cortex.yaml
- /etc/cortex/migrations:/etc/migrations
expose:
- "9009"
ports:
- "9006:9009"
restart: on-failure

cortex-alertmanager:
image: cortexproject/cortex:master-54f10c2b1
command: ["/bin/cortex","-config.file=/etc/cortex.yaml" , "-target=alertmanager"]
volumes:
- /etc/cortex/alertmanager.yaml:/etc/cortex.yaml
expose:
- "9009"
ports:
- "9007:9009"
restart: on-failure

cortex-ruler:
image: cortexproject/cortex:master-54f10c2b1
command: ["/bin/cortex","-config.file=/etc/cortex.yaml" , "-target=ruler"]
volumes:
- /etc/cortex/rules.yaml:/etc/cortex.yaml
expose:
- "9009"
ports:
- "9008:9009"
restart: on-failure

cortex-all:
image: cortexproject/cortex:master-54f10c2b1
command: ["/bin/cortex","-config.file=/etc/cortex.yaml" , "-target=all"]
volumes:
- /etc/cortex/all.yaml:/etc/cortex.yaml
expose:
- "9009"
ports:
- "9009:9009"
restart: on-failure

Ccortex配置

All.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
auth_enabled: true
target: all
http_prefix: /api/prom

api:
alertmanager_http_prefix: /alertmanager
prometheus_http_prefix: /prometheus

server:
http_listen_port: 9009
grpc_listen_port: 9005
grpc_server_max_recv_msg_size: 104857600
grpc_server_max_send_msg_size: 104857600
grpc_server_max_concurrent_streams: 1000

query_range:
split_queries_by_interval: 24h
align_queries_with_step: true
cache_results: true
results_cache:
max_freshness: 5m
cache:
enable_fifocache: true
fifocache:
size: 2048
validity: 1h
redis:
endpoint: 119.3.159.111:6379
expiration: 168h

frontend:
max_outstanding_per_tenant: 100
log_queries_longer_than: 5s

chunk_store:
chunk_cache_config:
enable_fifocache: true
fifocache:
size: 2048
validity: 1h
redis:
endpoint: 119.3.159.111:6379
expiration: 7h

distributor:
shard_by_all_labels: true
pool:
health_check_ingesters: true

ingester:
spread_flushes: true
chunk_age_jitter: 0
max_chunk_age: 4h
walconfig:
wal_enabled: true
recover_from_wal: true
wal_dir: /tmp/cortex/wal
lifecycler:
join_after: 30s
min_ready_duration: 0s
final_sleep: 10s
num_tokens: 512
tokens_file_path: /tmp/cortex/wal/tokens
ring:
kvstore:
store: inmemory
# etcd:
# endpoints:
# - http://39.100.234.107:2379
replication_factor: 1

ingester_client:
grpc_client_config:
max_recv_msg_size: 104857600
max_send_msg_size: 104857600
use_gzip_compression: true

ruler:
enable_sharding: true
enable_api: true
alertmanager_url: http://cortex-alertmanager:9009/api/prom/alertmanager
storage:
type: configdb
configdb:
configs_api_url: http://cortex-configs:9009
ring:
kvstore:
store: etcd
prefix: ruler/
etcd:
endpoints:
- http://39.100.234.117:2379

configs:
database:
# uri: postgres://postgres:1234@119.3.159.111:5432/cortex?sslmode=disable
# migrations_dir: /etc/migrations
uri: memory://
api:
notifications:
disable_email: false
disable_webhook: false

alertmanager:
external_url: /api/prom/alertmanager
storage:
type: local
local:
path: /tmp

schema:
configs:
- from: 2020-07-06
store: cassandra
object_store: cassandra
schema: v10
index:
prefix: index_
period: 168h
chunks:
prefix: chunk_
period: 24h

storage:
cassandra:
addresses: 39.100.234.107
keyspace: cortex
timeout: 60s

limits:
ingestion_rate: 100000
ingestion_burst_size: 200000

purger:
object_store_type: filesystem

frontend_worker:
match_max_concurrent: true

table_manager:
retention_deletes_enabled: true
retention_period: 504h

rules.yaml,alertmanager.yaml,configs.yaml

在 all.yaml 中,我们将 target 修改为与其对应的 名称

说明

这里,我们在 configs 里面配置了 postgres , 我们 配置了 migrations_dir: /etc/migrations 的目录,这是我们的数据库的迁移脚本,脚本如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
CREATE TABLE IF NOT EXISTS traceable (
created_at timestamp with time zone not null default now(),
updated_at timestamp with time zone not null default now(),
deleted_at timestamp with time zone
);

CREATE TABLE IF NOT EXISTS configs (
id text NOT NULL,
type text NOT NULL,
subsystem text NOT NULL,
config jsonb NOT NULL,
PRIMARY KEY (id, type, subsystem)
) inherits(traceable);
1
2
3
4
5
6
7
8
ALTER TABLE configs RENAME COLUMN id TO owner_id;
ALTER TABLE configs RENAME COLUMN type TO owner_type;

-- Add a new auto-incrementing id.
ALTER TABLE configs ADD COLUMN id SERIAL;

ALTER TABLE configs DROP CONSTRAINT configs_pkey;
ALTER TABLE configs ADD PRIMARY KEY (id, owner_id, owner_type, subsystem);exit

根据 postgres 规则,定名字就好了。

nginx.conf

在集群环境中,我们为了方便外界的访问,可以通过使用 nginx 来对我们的 接口进行统一的管理,这样,我们后端的 n 个接口,其实对外就是只有一个接口:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
worker_processes  1;

error_log logs/error.log;
events {
worker_connections 1024;
}
http {
include mime.types;
types{
application/yaml;
application/json;
application/x-protobuf;
}
underscores_in_headers on;

log_format main '$remote_addr - $remote_user [$time_local] "$request" [$http_x_scope_orgid]'
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';

access_log logs/access.log main;

sendfile on;
keepalive_timeout 65;
gzip on;
server {
listen 80;

# remote_write api, 反代到cortex distributor
location ~ ^/api/prom/push(.*)$ {
proxy_pass http://cortex-all:9009;
}

# configs API, 反代到cortex configs
location ~ ^/api/prom/configs(.*)$ {
proxy_pass http://cortex-configs:9009;
}

# alertmanager, 反代到cortex alertmanager
location ~ ^/api/prom/alertmanager(.*)$ {
proxy_pass http://cortex-alertmanager:9009;
}

# prometheus 查询API, 反代到cortex query-frontend
location ~ ^/api/prom/api/v1(.*)$ {
proxy_pass http://cortex-all:9009;
}
}
}

启动

1
docker-composer up

执行指令后,我们可以看到,我们的 Cortex 集群就启动了。

这是打赏的地方...

本文标题:Docker 部署简单的 Cortex 集群环境

文章作者:Mr.Sun

发布时间:2020年07月08日 - 13:56:12

最后更新:2020年07月30日 - 09:38:34

原始链接:http://www.blog.sun-iot.xyz/posts/55600398

许可协议: 署名-非商业性使用-禁止演绎 4.0 国际 转载请保留原文链接及作者。

---------Thanks for your attention---------