Docker Jaeger Agent Setup

pre-requirements

.env

1
2
3
cp .env-trunk .env
vi .env
JAEGER_ACCESS_TOKEN=[YOUR_ACCESS_TOKEN]

docker-compose.yml

1
2
3
4
5
6
7
8
9
10
11
12
version: '3.3'

services:
jaeger-agent:
image: jaegertracing/jaeger-agent
command: ["--reporter.grpc.host-port=tracing-analysis-dc-sh.aliyuncs.com:1883", "--jaeger.tags=Authentication=$JAEGER_ACCESS_TOKEN"]
ports:
- "5775:5775/udp"
- "6831:6831/udp"
- "6832:6832/udp"
- "5778:5778"
restart: on-failure

deploy_jaeger_agent.sh

1
docker-compose up -d jaeger-agent

Docker Etcd Setup

start a single node etcd

pre-requirements

.env

1
2
3
cp .env-trunk .env
vi .env
ETCD_IP=127.0.0.1

create etcd-data

1
mkdir ./default.etcd

docker-compose.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
version: '3.3'

services:
etcd-127_0_0_1:
container_name: etcd-127_0_0_1
image: "quay.io/coreos/etcd:v3.3"
environment:
ETCD_NAME: etcd-127_0_0_1
ETCD_ADVERTISE_CLIENT_URLS: "http://$ETCD_IP:2379,http://$ETCD_IP:4001"
ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379,http://0.0.0.0:4001"
ETCD_INITIAL_ADVERTISE_PEER_URLS: "http://$ETCD_IP:2380"
ETCD_LISTEN_PEER_URLS: "http://0.0.0.0:2380"
ETCD_INITIAL_CLUSTER_TOKEN: txin2019
ETCD_INITIAL_CLUSTER: etcd-127_0_0_1=http://$ETCD_IP:2380
ETCD_INITIAL_CLUSTER_TOKEN_STATE: new
ETCDCTL_API: "3"
volumes:
- ./default.etcd:/default.etcd
ports:
- 2379:2379
- 2380:2380
- 4001:4001

deploy_127.0.0.1.sh

1
docker-compose up -d etcd-127_0_0_1

Centos NFS Setup

pre-requirements

  • Centos 7

install nfs

1
yum -y install rpcbind nfs-utils

create nfs_data

1
2
3
4
mkdir -p /mnt/data/nfs_data
# example for mysql
mkdir -p /mnt/data/nfs_data/mysql
chown -R systemd-bus-proxy:ssh_keys /mnt/data/nfs_data/mysql

modify /etc/exports

1
/mnt/data/nfs_data/mysql 192.168.3.0/24(rw,no_root_squash)

reload nfs config

1
exportfs -r

start services

1
2
3
4
systemctl start rpcbind
systemctl start nfs
systemctl enable rpcbind
systemctl enable nfs

check script

1
2
rpcinfo -p
showmount -e

Docker Portainer and Agent Setup

pre-requirements

maintance

1
2
docker service update --image portainer/portainer portainer
docker service update --image portainer/agent portainer_agent

create portainer_data

1
mkdir -p /mnt/data/portainer_data

create_network.sh

1
docker network create -d overlay portainer_agent_network

create_portainer_agent.sh

1
2
3
4
5
6
7
8
9
10
docker service create \
--name portainer_agent \
--network portainer_agent_network \
-e AGENT_CLUSTER_ADDR=tasks.portainer_agent \
--mode global \
--constraint 'node.platform.os == linux' \
--mount type=bind,src=//var/run/docker.sock,dst=/var/run/docker.sock \
--mount type=bind,src=//var/lib/docker/volumes,dst=/var/lib/docker/volumes \
--mount type=bind,src=//etc/localtime,dst=/etc/localtime \
portainer/agent

create_portainer.sh

1
2
3
4
5
6
7
8
9
docker service create \
--name portainer \
--network portainer_agent_network \
--publish 9000:9000 \
--replicas=1 \
--constraint 'node.role == manager' \
--mount type=bind,source=/mnt/data/portainer_data,destination=/data \
--mount type=bind,src=//etc/localtime,dst=/etc/localtime \
portainer/portainer -H "tcp://tasks.portainer_agent:9001" --tlsskipverify

Etcd Registrator Docker Setup

PreRequirements

docker-compose.yml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96

version: '3.3'

services:
etcd:
container_name: etcd-170
image: "quay.io/coreos/etcd:v3.3"
environment:
ETCD_NAME: etcd-170
ETCD_ADVERTISE_CLIENT_URLS: "http://$ETCD_IP:2379,http://0.0.0.0:2379"
ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS: "http://$ETCD_IP:2380"
ETCD_LISTEN_PEER_URLS: "http://0.0.0.0:2380"
ETCD_INITIAL_CLUSTER_TOKEN: txin2018
ETCD_INITIAL_CLUSTER: etcd-33=http://$ETCD_NODE2_IP:2380,etcd-21=http://$ETCD_NODE1_IP:2380,etcd-170=http://$ETCD_IP:2380
ETCD_INITIAL_CLUSTER_TOKEN_STATE: new
ETCDCTL_API: "3"
SERVICE_2379_NAME: "etcd-170"
volumes:
- ./default.etcd:/default.etcd
ports:
- 2379:2379
- 2380:2380
- 4001:4001

etcd_node1:
container_name: etcd-21
image: "quay.io/coreos/etcd:v3.3"
environment:
ETCD_NAME: etcd-21
ETCD_ADVERTISE_CLIENT_URLS: "http://$ETCD_NODE1_IP:2379,http://0.0.0.0:2379"
ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS: "http://$ETCD_NODE1_IP:2380"
ETCD_LISTEN_PEER_URLS: "http://0.0.0.0:2380"
ETCD_INITIAL_CLUSTER_TOKEN: txin2018
ETCD_INITIAL_CLUSTER: etcd-33=http://$ETCD_NODE2_IP:2380,etcd-21=http://$ETCD_NODE1_IP:2380,etcd-170=http://$ETCD_IP:2380
ETCD_INITIAL_CLUSTER_TOKEN_STATE: new
ETCDCTL_API: "3"
SERVICE_2379_NAME: "etcd-21"
volumes:
- ./default.etcd:/default.etcd
ports:
- 2379:2379
- 2380:2380
- 4001:4001

etcd_node2:
container_name: etcd-33
image: "quay.io/coreos/etcd:v3.3"
environment:
ETCD_NAME: etcd-33
ETCD_ADVERTISE_CLIENT_URLS: "http://$ETCD_NODE2_IP:2379,http://0.0.0.0:2379"
ETCD_LISTEN_CLIENT_URLS: "http://0.0.0.0:2379"
ETCD_INITIAL_ADVERTISE_PEER_URLS: "http://$ETCD_NODE2_IP:2380"
ETCD_LISTEN_PEER_URLS: "http://0.0.0.0:2380"
ETCD_INITIAL_CLUSTER_TOKEN: txin2018
ETCD_INITIAL_CLUSTER: etcd-33=http://$ETCD_NODE2_IP:2380,etcd-21=http://$ETCD_NODE1_IP:2380,etcd-170=http://$ETCD_IP:2380
ETCD_INITIAL_CLUSTER_TOKEN_STATE: new
ETCDCTL_API: "3"
SERVICE_2379_NAME: "etcd-21"
volumes:
- ./default.etcd:/default.etcd
ports:
- 2379:2379
- 2380:2380
- 4001:4001

registrator:
container_name: registrator-170
image: gliderlabs/registrator:latest
# Tell registrator where the etcd HTTP API is and to use
# the docker VM's IP
command: [ -ttl=60, -ttl-refresh=30, -ip, "$ETCD_IP", "etcd://$ETCD_IP:2379/trunk/services"]
volumes:
# So registrator can use the docker API to inspect containers
- "/var/run/docker.sock:/tmp/docker.sock"

registrator_node1:
container_name: registrator-21
image: gliderlabs/registrator:latest
# Tell registrator where the etcd HTTP API is and to use
# the docker VM's IP
command: [ -ttl=60, -ttl-refresh=30, -ip, "$ETCD_NODE1_IP", "etcd://$ETCD_NODE1_IP:2379/trunk/services"]
volumes:
# So registrator can use the docker API to inspect containers
- "/var/run/docker.sock:/tmp/docker.sock"

registrator_node2:
container_name: registrator-33
image: gliderlabs/registrator:latest
# Tell registrator where the etcd HTTP API is and to use
# the docker VM's IP
command: [ -ttl=60, -ttl-refresh=30, -ip, "$ETCD_NODE2_IP", "etcd://$ETCD_NODE2_IP:2379/trunk/services"]
volumes:
# So registrator can use the docker API to inspect containers
- "/var/run/docker.sock:/tmp/docker.sock"

deploy_manager.sh

1
2
3
4
export ETCD_IP=192.168.3.170
export ETCD_NODE1_IP=192.168.3.21
export ETCD_NODE2_IP=192.168.3.33
docker-compose up -d etcd registrator

deploy_node1.sh

1
2
3
4
export ETCD_IP=192.168.3.170
export ETCD_NODE1_IP=192.168.3.21
export ETCD_NODE2_IP=192.168.3.33
docker-compose up -d etcd_node1 registrator_node1

deploy_node1.sh

1
2
3
4
export ETCD_IP=192.168.3.170
export ETCD_NODE1_IP=192.168.3.21
export ETCD_NODE2_IP=192.168.3.33
docker-compose up -d etcd_node2 registrator_node2

etcd_client_demo.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
import etcd

client = etcd.Client(host='192.168.3.170', port=2379)

SERVICE_PATH = '/trunk/services/'

SERVICE_NAME = 'test_service'

try:
client.read(SERVICE_PATH)

req_path = SERVICE_PATH + SERVICE_NAME

directory = client.get(req_path)
if directory and directory.children:
for obj in directory.children:
print(str(obj.key) + ': ' + str(obj.value))

except etcd.EtcdKeyNotFound:
print('SERVICE_PATH: ' + req_path + ' read error!')

Go alisms 阿里云短信服务构建

PreRequirements

proto/alisms/alisms.proto

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
syntax = "proto3";

package alisms;

message SMSVerficationCodeData {
string sign_name = 1;
string phone_numbers = 2;
string template_code = 3;
string template_param = 4;
string sms_up_extend_code = 5;
string out_id = 6;
}

message SMSVerficationCodeCheckData {
string phone_numbers = 1;
string vcode = 2;
}

message SMSVerficationResponseData {
int64 return_code = 1;
string message = 2;
string data = 3;
}

message SMSVerficationQueryData {
string phone_numbers = 1;
string send_date = 2;
string page_size = 3;
string current_page = 4;
string biz_id = 5;
}

message SMSVerficationQueryResponseData {
int64 return_code = 1;
string message = 2;
string data = 3;
}

service AuthService {
rpc SMSVerficationCode(SMSVerficationCodeData) returns (SMSVerficationResponseData) {}
rpc SMSVerficationCodeCheck(SMSVerficationCodeCheckData) returns (SMSVerficationResponseData) {}
rpc SMSVerficationQuery(SMSVerficationQueryData) returns (SMSVerficationQueryResponseData) {}
}

cache/cache.go

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
package cache

import (
"fmt"
"log"
"sync"
"github.com/noahzaozao/alisms_service/coinfig"
"github.com/go-redis/redis"
)

type CacheManager struct {
config config.CacheConfig
}

var instance *CacheManager
var once sync.Once

func CacheMgr() *CacheManager {
once.Do(func () {
instance = &CacheManager{}
})
return instance
}

//
// 初始化缓存配置文件
//
func (cacheMgr *CacheManager) Init(cacheConfig config.CacheConfig) error {
cacheMgr.config = cacheConfig
if cacheMgr.config.Type == "redis" {
dbConn, err := cacheMgr.Conn()
if err != nil {
return err
}
defer dbConn.Close()
log.Println("Cache connected")
} else {
log.Println("Cache Type is incorrect")
}
return nil
}

//
// 获取缓存连接
//
func (cacheMgr *CacheManager) Conn() (*redis.Client, error) {
connStr := fmt.Sprintf(
"%s:%s",
cacheMgr.config.Host,
cacheMgr.config.Port)
client := redis.NewClient(&redis.Options{
Addr: connStr,
Password: cacheMgr.config.Password, // no password set
DB: cacheMgr.config.DB, // use default DB
})
_, err := client.Ping().Result()
if err != nil {
return nil, err
}
return client, nil
}

config/config.go

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
package config

type SMSConfig struct {
ACCESS_KEY_ID string `yaml:"ACCESS_KEY_ID"`
ACCESS_KEY_SECRET string `yaml:"ACCESS_KEY_SECRET"`
}

type CacheConfig struct {
Type string `yaml:"type"`
Host string `yaml:"host"`
Port string `yaml:"port"`
DB int `yaml:"db"`
Password string `yaml:"password"`
}

type SettingConfig struct {
SECRET_KEY string `yaml:"SECRET_KEY"`
DEBUG string `yaml:"DEBUG"`
DEFAULT_CHARSET string `yaml:"DEFAULT_CHARSET"`
SMSConfig SMSConfig `yaml:"SMSConfig"`
CACHES []CacheConfig `yaml:"CACHES"`
}

main.go

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
package main

import (
"log"
"time"
"fmt"

"github.com/micro/go-grpc"
"github.com/micro/go-micro"
go_config "github.com/micro/go-config"

"github.com/noahzaozao/alisms_service/coinfig"
"github.com/noahzaozao/alisms_service/cache"
"context"
"github.com/noahzaozao/alisms_service/proto/alisms"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk"
"github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests"

)

type AliSMSService struct {
Config config.SettingConfig
}

func (aliSmsService *AliSMSService) SMSVerficationCode(
ctx context.Context, in *alisms.SMSVerficationCodeData, out *alisms.SMSVerficationResponseData) error {

client, err := sdk.NewClientWithAccessKey(
"default",
aliSmsService.Config.SMSConfig.ACCESS_KEY_ID,
aliSmsService.Config.SMSConfig.ACCESS_KEY_SECRET)
if err != nil {
panic(err)
}

request := requests.NewCommonRequest()
request.Method = "POST"
request.Scheme = "https"
request.Domain = "dysmsapi.aliyuncs.com"
request.Version = "2017-05-25"
request.ApiName = "SendSms"

request.QueryParams["SignName"] = in.SignName
request.QueryParams["PhoneNumbers"] = in.PhoneNumbers
request.QueryParams["TemplateCode"] = in.TemplateCode
request.QueryParams["TemplateParam"] = in.TemplateParam
request.QueryParams["SmsUpExtendCode"] = in.SmsUpExtendCode
request.QueryParams["OutId"] = in.OutId

response, err := client.ProcessCommonRequest(request)
if err != nil {
panic(err)
}
fmt.Print(response.GetHttpContentString())

return nil
}

func (aliSmsService *AliSMSService) SMSVerficationCodeCheck(
ctx context.Context, in *alisms.SMSVerficationCodeCheckData, out *alisms.SMSVerficationResponseData) error {

return nil
}

func (aliSmsService *AliSMSService) SMSVerficationQuery(
ctx context.Context, in *alisms.SMSVerficationQueryData, out *alisms.SMSVerficationQueryResponseData) error {

client, err := sdk.NewClientWithAccessKey(
"default",
aliSmsService.Config.SMSConfig.ACCESS_KEY_ID,
aliSmsService.Config.SMSConfig.ACCESS_KEY_SECRET)
if err != nil {
panic(err)
}

request := requests.NewCommonRequest()
request.Method = "POST"
request.Scheme = "https"
request.Domain = "dysmsapi.aliyuncs.com"
request.Version = "2017-05-25"
request.ApiName = "QuerySendDetails"
request.QueryParams["PhoneNumber"] = in.PhoneNumbers
request.QueryParams["SendDate"] = in.SendDate
request.QueryParams["PageSize"] = in.PageSize
request.QueryParams["CurrentPage"] = in.CurrentPage
request.QueryParams["BizId"] = in.BizId

response, err := client.ProcessCommonRequest(request)
if err != nil {
panic(err)
}
fmt.Print(response.GetHttpContentString())

return nil
}

func main() {

// Load json config file
if err := go_config.LoadFile("./config.yaml"); err != nil {
log.Println(err.Error())
return
}

var settingsConfig config.SettingConfig

if err := go_config.Get("config").Scan(&settingsConfig); err != nil {
log.Println(err.Error())
return
}

log.Println("DEBUG: " + settingsConfig.DEBUG)
log.Println("CHARSET: " + settingsConfig.DEFAULT_CHARSET)

if len(settingsConfig.CACHES) < 1 {
log.Println("CACHES config not exist")
return
}

if err := cache.CacheMgr().Init(settingsConfig.CACHES[0]); err != nil {
log.Println(err.Error())
return
}
log.Println("Init CACHE...")

service := grpc.NewService(
micro.Name("alisms.srv"),
micro.RegisterTTL(time.Second*30),
micro.RegisterInterval(time.Second*10),
)
service.Init()

alismsService := &AliSMSService{
Config: settingsConfig,
}

if err := alisms.RegisterAuthServiceHandler(service.Server(), alismsService); err != nil {
log.Println(err.Error())
return
}

if err := service.Run(); err != nil {
log.Fatal(err)
}
}

config.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
config:
SECRET_KEY: ""
DEBUG: "true"
DEFAULT_CHARSET: "utf-8"
SMSConfig:
ACCESS_KEY_ID: ""
ACCESS_KEY_SECRET: ""
CACHES:
- type: "redis"
host: "127.0.0.1"
port: "6379"
db: 0
password: "password"

build.sh

1
2
3
4
5
6
7
8
echo 'protoc'
protoc -I/usr/local/include -I. \
--proto_path=$GOPATH/src:. \
--micro_out=./proto/alisms/ \
--go_out=./proto/alisms/ \
-I./proto/alisms/ alisms.proto &&
go build
echo 'success'

Python Celery 微服务范例

PreRequirements

requirements.txt

1
2
3
4
5
6
celery==4.2.1
Django==1.11.20
django-celery==3.2.2
redis==3.2.0
grpcio==1.19.0
grpcio-tools==1.19.0

Install Dependent Packages

1
2
pyenv activate env_celery_demo
pip install --no-cache-dir -r requirements.txt -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com

Start django project and demo_app

1
2
3
python manage.py startproject demo
python manage.py startapp demo_app

demo/celery.py

1
2
3
4
5
6
7
8
9
10
11
12
from celery import Celery
import os

os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'demo.settings')

from django.conf import settings

app = Celery('demo')

app.config_from_object('django.conf.settings')

app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)

Add lines in demo/settings.py INSTALLED_APPS

1
2
3
4
5
6
7
8
9
10
11
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',

'djcelery',
'demo_app',
]

Add lines at bottom of demo/settings.py

1
2
3
4
5
djcelery.setup_loader()
CELERY_TIMEZONE = TIME_ZONE
BROKER_URL = 'redis://:password@127.0.0.1:6379/8'
BROKER_PASSWORD = 'password'
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'

demo_app/tasks.py

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
from demo.celery import app


class AddClass(app.Task):
def run(self):
print('run')


app.tasks.register(AddClass)


@app.task
def demo_task():
print('demo_task')


app.tasks.register(demo_task)

Run celery

1
2
python manage.py celery worker -l info
python manage.py celery beat

Run django

1
python manage.py runserver 0.0.0.0:8000