BlueXIII's Blog

热爱技术,持续学习

0%

参考文档

node_exporter

1
2
3
4
mkdir /sdecloud && cd /sdecloud
wget https://mazqmdpic.oss-cn-beijing.aliyuncs.com/node_exporter
chmod +x node_exporter
nohup ./node_exporter &

验证:
http://10.193.10.51:9100/metrics

elasticsearch_exporter

只监控主节点:

1
nohup ./elasticsearch_exporter --es.uri http://10.193.10.51:9200 &

验证:
http://10.193.10.51:9114/metrics

Prometheus

vi prometheus.yml

1
2
3
- job_name: 'ES集群'
static_configs:
- targets: ['10.193.10.51:9100','10.193.10.52:9100','10.193.10.53:9100']

vi prometheus.yml

1
2
3
- job_name: 'ES监控'
static_configs:
- targets: ['10.193.10.51:9114','10.193.10.52:9114','10.193.10.53:9114']

启动:

1
nohup /sdecloud/prometheus/prometheus   --web.enable-admin-api --web.enable-lifecycle --storage.tsdb.retention=190d &

验证:
http://10.193.21.4:9090

Grafana

http://10.133.0.131:3000/d/Od_cKCwnk/elasticsearch?orgId=1

SSL支持

参考文档

https://blog.51cto.com/u_13363488/2350495

安装SSL依赖

1
2
# 直接使用yum安装ssl相关devel依赖
yum -y install openssl openssl-devel pcre pcre-devel zlib zlib-devel

编译参数

1
--with-http_ssl_module \

Prometheus监控

参考文档

下载nginx-module-vts

1
git clone git://github.com/vozlt/nginx-module-vts.git

编译参数

1
2
--with-http_stub_status_module \
--add-module=/data/pkg/nginx-module-vts-master

nginx.conf配置

1
2
3
4
5
6
7
8
9
10
11
12
13
# http新增
vhost_traffic_status_zone;

# server新增
# http_stub_status_module status
location = /basic_status {
stub_status on;
}
# nginx-module-vts status
location /status {
vhost_traffic_status_display;
vhost_traffic_status_display_format html;
}

验证

Prometheus

ssh root@10.193.21.4 # yourpass!

vi prometheus.yml

1
2
3
4
5
6
7
8
9
10
11
- job_name: 'ES集群'
static_configs:
- targets: ['10.193.10.51:9100','10.193.10.52:9100','10.193.10.53:9100']

- job_name: 'Nginx监控'
metrics_path: '/status/format/prometheus'
scheme: 'http'
static_configs:
- targets: ['10.193.10.54:80']
labels:
instance: dev-nginx

Grafana

导入模板: https://grafana.com/grafana/dashboards/13816


地址位置限制

参考文档

安装geoip-devel

1
yum -y install geoip-devel

下载geoip数据库

https://www.miyuru.lk/geoiplegacy
https://mirrors-cdn.liferay.com/geolite.maxmind.com/download/geoip/database/

编译参数

1
--with-http_geoip_module \

nginx.conf配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
# http新增

geoip_country /data/geoip/GeoIP.dat;
geoip_city /data/geoip/GeoLiteCity.dat;

geo $lan-ip {
default no;
10.193.0.0/16 yes;
}

map $geoip_country_code $allowed_country {
default no;
CN yes;
}

map $geoip_city $allowed_region {
default no;
Jinan yes;
}

# server新增
if ($lan-ip = yes) {
set $allowed_country yes;
}
if ($allowed_country = no) {
return 405;
}
if ($allowed_region = no) {
return 405;
}

验证

curl http://58.59.15.8:45444
curl http://10.193.10.54:80


Nginx编译

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# 下载源码包
wget https://nginx.org/download/nginx-1.20.2.tar.gz

# 安装gcc
yum install gcc -y

# 配置
./configure --prefix=/data/nginx \
--with-http_ssl_module \
--with-http_stub_status_module \
--add-module=/data/pkg/nginx-module-vts-master \
--with-http_geoip_module \
--with-stream \
--with-http_v2_module \
--with-http_realip_module


# 编译
make
make install

验证:
http://10.193.10.54:80

官网及下载

https://github.com/ginuerzh/gost

跳板机

  • 使用10.80.8.237
  • 复用Nginx的8080端口

服务端Gost

1
./gost -L "ws://172.17.0.1:31314?path=/gost"

服务端Nginx转发配置

1
2
3
4
5
6
7
8
9
10
location /ray {
proxy_redirect off;
proxy_pass http://172.17.0.1:31313;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}

本机DockerEC

1
docker run --name ecweifang --device /dev/net/tun --cap-add NET_ADMIN -itd -p 127.0.0.1:7011:1080 -e EC_VER=7.6.3 -e CLI_OPTS="-d https://xx.xx.xx.xx -u username -p password" hagb/docker-easyconnect:cli

本机Gost

1
./gost -L=:7013 -F="ws://10.80.8.237:8080?path=/gost"

本机Proxifier配置

  • gost进程 + 10.80.8.237网段 –> 127.0.0.1:7011
  • 任意进程 + 10.80.8.*网段 –> 127.0.0.1:7013

信息汇总

ES http://10.193.10.51:9200
Kibana http://10.193.10.51:5601
版本: 5.6.5

服务器

10.193.10.51
10.193.10.52
10.193.10.53

ES

安装

1
2
3
4
5
6
7
8
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.6.4.rpm

rpm -ivh jdk-8u321-linux-x64.rpm
rpm -ivh elasticsearch-5.6.4.rpm

mkdir /data/elasticsearch/data -p
mkdir /data/elasticsearch/logs -p
chown -R elasticsearch:elasticsearch /data/elasticsearch

配置

vi /etc/elasticsearch/elasticsearch.yml

1
2
3
4
5
6
7
8
9
10
11
cluster.name: es-cluster-dev
node.name: node-01
path.data: /data/elasticsearch/data
path.logs: /data/elasticsearch/logs
network.host: 10.193.10.51
http.port: 9200
discovery.zen.ping.unicast.hosts: ["10.193.10.51", "10.193.10.52", "10.193.10.53"]
discovery.zen.minimum_master_nodes: 2
bootstrap.memory_lock: true
http.cors.enabled: true
http.cors.allow-origin: "*"

IK分词器插件

1
2
3
4
5
wget https://github.com/medcl/elasticsearch-analysis-ik/releases/download/v5.6.16/elasticsearch-analysis-ik-5.6.16.zip
cp elasticsearch-analysis-ik-5.6.4.zip /usr/share/elasticsearch/plugins/
cd /usr/share/elasticsearch/plugins/
unzip elasticsearch-analysis-ik-5.6.4.zip
mv elasticsearch ik

测试

http://10.193.10.51:9200

启停

1
2
3
4
5
6
7
systemctl enable elasticsearch
systemctl start elasticsearch
systemctl status elasticsearch
tail -200f /data/elasticsearch/logs/es-cluster-dev.log

systemctl daemon-reload
systemctl restart elasticsearch

设置max_result_window

1
2
3
4
5
6
7
8
GET /zyachievementindex0424/_settings

PUT /zyachievementindex0424/_settings
{"index.max_result_window" :"1000000"}

curl -XPUT "http://localhost:9200/my_index/_settings" -d '{ "index" : { "max_result_window" : 500000 } }' -H "Content-Type: application/json"

{ "index" : { "max_result_window" : 500000}}

Kibana

安装

1
2
3
wget https://artifacts.elastic.co/downloads/kibana/kibana-5.6.4-x86_64.rpm
rpm -ivh kibana-5.6.4-x86_64.rpm

配置

vi /etc/kibana/kibana.yml

启动

1
2
3
4
5
systemctl enable kibana
systemctl start kibana
systemctl status kibana

systemctl restart kibana

测试

http://10.193.10.51:5601


Logstash

安装

1
2
3
wget https://artifacts.elastic.co/downloads/logstash/logstash-5.6.4.rpm
rpm -ivh logstash-5.6.4.rpm
chown -R logstash:logstash /var/log/logstash

配置

vi /etc/logstash/logstash.conf

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
input {
elasticsearch {
hosts => "10.193.10.21:9200"
index => "hphzptidlegoods-test"
docinfo => true
size => 1000
scroll => "1m"
}
}

filter {
mutate {
remove_field => ["@timestamp", "@version"]
}
}

output {
elasticsearch {
hosts => ["http://10.193.10.51:9200"]
index => "%{[@metadata][_index]}"
document_type => "%{[@metadata][_type]}"
document_id => "%{[@metadata][_id]}"
}
}

数据迁移

1
/usr/share/logstash/bin/logstash "--path.settings" "/etc/logstash" -f /etc/logstash/logstash.conf

dubbo-admin

下载

https://github.com/apache/dubbo-admin

注意下载0.3.0
0.4.0的mock-server会同时启动2次,报端口重复

界面

本机测试:
http://localhost:8080/
root/root

生产环境(zookeeper://10.193.10.55:2181):
http://10.193.21.3:8081
root/root
监控生产环境

测试环境(zookeeper://10.193.10.11:2181):
http://10.193.21.3:8082
root/root

dubbo-keeper(备选)

参考文档

参考文档

测试机

10.193.10.201
10.193.10.202
10.193.10.203

安装Docker

1
2
3
4
5
6
7
8
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install -y docker-ce-19.03.13 docker-ce-cli-19.03.13 containerd.io
docker --version
systemctl enable docker
systemctl start docker
systemctl daemon-reload
systemctl status docker

部署Swarm

1
2
3
4
5
6
7
8
9
10
11
# 初始化主节点
docker swarm init --advertise-addr 10.193.10.201

# 再次查看token
docker swarm join-token worker

# 加入主节点
docker swarm join --token SWMTKN-1-5ijp838v2gjyq0v85bl2reiptw7qk8cj7verfgjrfb97nun8yd-1215mjt92g7ahl7jnuyoxmpq1 10.193.10.201:2377

# 查看节点
docker node ls

部署应用

1
2
3
4
5
6
7
8
9
10
11
12
13
14
# 部署应用
docker service create --replicas 1 --name helloworld alpine ping baidu.com

# 查看服务
docker service ls

# 查看服务的详细信息
docker service inspect --pretty helloworld

# 查看服务在哪些节点运行
docker service ps helloworld

# 删除服务
docker service rm helloworld

更新服务

1
2
3
4
5
6
7
8
9
10
# 部署一个redis服务
docker service create \
--replicas 3 \
--name redis \
--update-delay 10s \
redis:3.0.6

# 更新服务
docker service update --image redis:3.0.7 redis
docker service update redis

排除节点

1
2
3
4
5
6
7
8
9
10
11
# 排除节点
docker node update --availability drain playground03

# 重新激活节点
docker node update --availability active playground03

# 删除节点(慎用)
docker node rm --force playground03

# 强制平衡节点
docker service update helloworld --force

端口映射

https://holynull.gitbooks.io/docker-swarm/content/kai-shi-shi-yong-swarm/swarmmo-shi-duan-kou-lu-you.html

1
2
3
4
5
6
7
docker service create \
--name my-web \
--publish published=8080,target=80 \
--replicas 2 \
nginx

docker service inspect --format="{{json .Endpoint.Spec.Ports}}" my-web

测试:
http://10.193.10.201:8080
http://10.193.10.202:8080
http://10.193.10.203:8080

Stack

https://docs.docker.com/engine/reference/commandline/stack_deploy/
https://www.jianshu.com/p/25c529e3a3e6

1
2
3
4
5
6
7
8
9
10
# 部署服务
docker stack deploy -c docker-compose.yml wordpress

# 查看服务
docker stack ls
docker stack ps swarmpit
docker stack services swarmpit

# 移除
docker stack rm xxx

网络

https://zhuanlan.zhihu.com/p/129258067

1
2
3
4
5
# 查询网络
docker network ls

# 创建网络
docker network create --driver overlay swarmnetwork

绑定多个节点

1
2
3
4
5
6
7
8
9
# 为多个节点添加标签
docker node update --label-add h5=true playground01
docker node update --label-add h5=true playground02

# 绑定标签
vi compose.yml
deploy:
placement:
constraints: [node.labels.h5 == true]

CPU/MEM限制

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# 查看本节点统计信息
docker stats

# 查看service状态
docker service inspect --pretty admin_admin-service

# 查看所有节点service状态(先配置互信)
docker node ls | cut -c 31-49 | grep -v HOSTNAME | xargs -I"SERVER" sh -c "echo SERVER; ssh SERVER docker stats --no-stream"

# CPU/MEM保留
docker service update --reserve-cpu 2 --reserve-memory 512m h5-test_web-service

# CPU/MEM限制
docker service update --limit-cpu 2 --limit-memory 512m h5-test_web-service

# 去除保留与限制
docker service update --limit-cpu 0 --limit-memory 0 --reserve-cpu 0 --reserve-memory 0 h5-test_web-service

服务伸缩

1
2
3
4
5
# 服务伸缩
docker service scale admin_admin-service=5

# 查看服务状态
docker service ps admin_admin-service

查看service日志

1
docker service logs -f --tail 0 hphz-es-service_bridge-service

常用操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
docker node ls

docker node inspect host101 --format '{{ .Status.Addr }}'
docker service inspect --format="{{json .Endpoint.Spec.Ports}}" my-web

docker node inspect --pretty artiServer

docker service ls
docker service inspect --pretty bridge-service_bridge-service
docker service ps bridge-service_bridge-service

docker service scale helloworld=5

docker service update bridge-service_bridge-service --force

docker service scale hphz-es-service_bridge-service=1

docker stack rm h5-test-mazq

docker service logs -f --tail 10 hphz-es-service-temp_bridge-service-temp

docker stack rm hphz-es-service-temp
export SERVICES_hphz_es_SERVICE_IMAGE=artifactory.sdecloud.com:8443/docker-local/hphz-es:RELEASE-1.197
docker stack deploy -c hphz-es-service-temp.yml hphz-es-service-temp --with-registry-auth

官网

https://docs.rancher.cn/docs/k3s/quick-start/_index

在线安装

1
2
3
4
5
6
7
8
# server
curl -sfL http://rancher-mirror.cnrancher.com/k3s/k3s-install.sh | INSTALL_K3S_MIRROR=cn sh -
cat /var/lib/rancher/k3s/server/node-token

# node
curl -sfL http://rancher-mirror.cnrancher.com/k3s/k3s-install.sh | INSTALL_K3S_MIRROR=cn K3S_URL=https://10.193.2.8:6443 K3S_TOKEN=K103d30bcbca9e48b6c642e0f92af0d656e465527b5929345537ae086b7fa8ea2e4::server:9bc179e5cfb5b8c09eac21c9ce145a58 sh -

vi /etc/rancher/k3s/k3s.yaml

Registry安装

1
2
3
4
5
6
7
8
yum install -y yum-utils
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum install docker-ce docker-ce-cli containerd.io
service docker start
docker run -d -p 5000:5000 --restart=always --name registry registry:2
docker run -d -p 5000:5000 -v /data/registry:/var/lib/registry registry:2

http://10.193.2.11:5000

Harbor离线安装

https://cloud.tencent.com/developer/article/1763874

mongodb

influxdb

iot协议

物联网平台

官网

简易配置

vi mysql_sync.json

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
{
"job": {
"setting": {
"speed": {
"channel": 1
}
},
"content": [
{
"reader": {
"name": "mysqlreader",
"parameter": {
"username": "root",
"password": "password",
"connection": [
{
"querySql": ["select * from info_site where id < 300;"],
"jdbcUrl": ["jdbc:mysql://10.194.99.2:3306/hydrabot_info_prd"]
}
]
}
},
"writer": {
"name": "mysqlwriter",
"parameter": {
"writeMode": "insert",
"username": "root",
"password": "password",
"column": ["*"],
"session": ["set session sql_mode='ANSI'"],
"preSql": ["delete from test"],
"connection": [
{
"jdbcUrl": "jdbc:mysql://10.194.98.8:3306/hydrabot_info_prd?useUnicode=true&characterEncoding=gbk",
"table": ["test"]
}
]
}
}
}
]
}
}