架构需求:
1 架构网站:zrlog wecenter wordpress cloud
2 要求: 实现两个负载均衡器代理并介入keepalived高可用
3 实现全站https加密
4 实现两个web站点
5 实现nfs服务器存储网站静态资源
6 实现backup服务器能实时有nfs同步(rersync/lsync)过来的数据
实现全网备份功能(rsync)
7 实现服务器内部共享上网
8 实现各服务器之间时间同步
9 实现ansible一键操作
10 实现telport跳板机
11 实现firewall对负载均衡服务器开放指定端口
ansible:其功能实现基于SSH远程连接服务
批量系统配置、批量软件部署、批量文件拷贝、批量运行命令等功能
yum install epel-release -y
yum install ansible -y
生成公钥推送给每一天服务器
[root@m01 ~]# cat ssh-key.sh
for i in 31 41 51 7 8
do
sshpass -p 123456 ssh-copy-id -i ~/.ssh/id_rsa.pub [email protected].$i
[root@m01 ~]# ansible --version
ansible 2.7.8
[root@m01 ~]# tree /etc/ansible/
/etc/ansible/
├── ansible.cfg
├── hosts
└── roles
ansible文件
[root@m01 ansible_role]# cat ansible.cfg
[defaults]
inventory = ./hosts
#library = /usr/share/my_modules/
#module_utils = /usr/share/my_module_utils/
#remote_tmp = ~/.ansible/tmp
#local_tmp = ~/.ansible/tmp
hosts文件
[root@m01 ansible_role]# cat hosts
[oldboy]
172.16.1.31
172.16.1.41
[web]
172.16.1.7
172.16.1.8
[web:vars]
inventory_var=inventory_variables
[nfs]
172.16.1.31
[backup]
172.16.1.41
[db]
172.16.1.51
[lb]
172.16.1.5
172.16.1.6
[firewalld]
172.16.1.7
172.16.1.8
172.16.1.51
172.16.1.41
172.16.1.31
top.yml文件
[root@m01 ansible_role]# cat top.yml
#- hosts: lb
# roles:
# - { role: keepalived , tags: kee }
#- hosts: all
# roles:
# - role: base
- hosts: db
roles:
- role: redis
- role: mariadb
- hosts: nfs
roles:
- role: nfs
- hosts: web
roles:
- role: nginx-web
- role: kodcloud-web
- hosts: backup
roles:
- role: backup
tags: backup
- hosts: nfs
roles:
- role: nfs-lsyncd
- hosts: all
roles:
- { role: ntp , tags: ntp }
- hosts: lb
roles:
- role: kodcloud-proxy
- role: keepalived
- hosts: firewalld
roles:
- role: firewalld
tags: fire
基础环境ansible部署
[root@m01 ansible_role]# tree base/
base/
|-- files
|-- handlers
| `-- main.yml
|-- tasks
| `-- main.yml
`-- templates
`-- sshd_config.j2
[root@m01 ansible_role]# cat base/tasks/main.yml
- name: create group
group:
name: www
gid: 666
- name: create user
user:
name: www
uid: '666'
group: '666'
create_home: no
shell: /sbin/nologin
- name: Disable Firewalld
systemd:
name: firewalld
state: stopped
enabled: no
- name: Disable Selinux
selinux:
state: disabled
- name: Create YUM_Repository Epel
yum_repository:
name: epel
description: EPEL YUM repo
baseurl: http://mirrors.aliyun.com/epel/7/$basearch
gpgcheck: no
- name: Create YUM_Repository Base
yum_repository:
name: base
description: BASE YUM repo
baseurl: http://mirrors.aliyun.com/centos/$releasever/os/$basearch/
gpgcheck: no
- name: Create YUM_Repository Nginx
yum_repository:
name: nginx
description: Nginx YUM repo
baseurl: http://nginx.org/packages/centos/$releasever/$basearch/
gpgcheck: no
when: ( ansible_hostname is match ('web*') ) or
( ansible_hostname is match ('lb*') )
- name: Create YUM_Repository PHP
yum_repository:
name: php
description: PHP YUM repo
baseurl:
mirrorlist: http://cdn.remirepo.net/enterprise/7/php71/mirror
gpgcheck: no
when: ( ansible_hostname is match ('web*') )
#- name: Installed Packages All
# yum:
# name: "{{ base_packages }}"
# vars:
# base_packages:
# - nfs-utils
# - rsync
# - wget
# - unzip
# - glances
# - lrzsz
# - vim
# - net-tools
# - bash-completion
# - tree
# - MySQL-python
# - mariadb
# ignore_errors: yes
- name: Changed SSH Configure
template:
src: sshd_config.j2
dest: /etc/ssh/sshd_config
notify: Restart SSH Server
- name: Set sysctl file limiits
pam_limits:
domain: '*'
limit_type: "{{ item.limit_type }}"
limit_item: "{{ item.limit_item }}"
value: "{{ item.value }}"
loop:
- { limit_type: 'soft',limit_item: 'nofile', value: '65535' }
- { limit_type: 'hard',limit_item: 'nofile', value: '65535' }
- { limit_type: 'soft',limit_item: 'nproc', value: '102400' }
- { limit_type: 'hard',limit_item: 'nproc', value: '102400' }
[root@m01 ansible_role]# cat base/templates/sshd_config.j2
# $OpenBSD: sshd_config,v 1.100 2016/08/15 12:32:04 naddy Exp $
# This is the sshd server system-wide configuration file. See
# sshd_config(5) for more information.
# This sshd was compiled with PATH=/usr/local/bin:/usr/bin
# The strategy used for options in the default sshd_config shipped with
[root@m01 ansible_role]# cat base/handlers/main.yml
- name: Restart SSH Server
systemd:
name: php-fpm
state: restarted
nginx部署
[root@m01 ansible_role]# cat nginx-web/tasks/main.yml
- name: install nginx
yum:
name: nginx
state: present
- name: configure nginx server
template:
src: nginx.conf.j2
dest: /etc/nginx/nginx.conf
owner: root
group: root
mode: '0644'
notify: Restart Nginx Server
- name: Systemd Nginx Server
systemd:
name: nginx
state: started
enabled: yes
[root@m01 ansible_role]# cat nginx-web/templates/nginx.conf.j2
user www;
worker_processes 1;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
access_log /var/log/nginx/access.log main;
sendfile on;
tcp_nopush on;
keepalive_timeout 65;
#gzip on;
include /etc/nginx/conf.d/*.conf;
}
[root@m01 ansible_role]# cat nginx-web/handlers/main.yml
- name: Restemd Nginx Server
systemd:
name: nginx
state: restarted
php部署
[root@m01 ansible_role]# cat php/tasks/main.yml
- name: Installed PHP
yum:
name: "{{ packages }}"
state: present
- name: configure php
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "{{ item.mode }}"
owner: root
group: root
loop:
- { src: php.ini.j2 , dest: /etc/php.ini , mode: '0644' }
- { src: php_www.conf.j2 , dest: /etc/php-fpm.d/www.conf , mode: '0644' }
notify: Restart PHP Server
- name: Restart PHP Server
systemd:
name: php-fpm
state: restarted
enabled: yes
[root@m01 ansible_role]# cat php/templates/php.ini.j2
[PHP]
;;;;;;;;;;;;;;;;;;;
; About php.ini ;
;;;;;;;;;;;;;;;;;;;
......
[root@m01 ansible_role]# cat php/templates/php_www.conf.j2
[www]
user = {{ web_process_user }}
group = {{ web_process_group }}
listen = 127.0.0.1:9000
listen.allowed_clients = 127.0.0.1
pm = dynamic
pm.max_children = 50
pm.start_servers = 5
pm.min_spare_servers = 5
pm.max_spare_servers = 35
slowlog = /var/log/php-fpm/www-slow.log
php_admin_value[error_log] = /var/log/php-fpm/www-error.log
php_admin_flag[log_errors] = on
;php_value[session.save_handler] = files
;php_value[session.save_path] = /var/lib/php/session
php_value[soap.wsdl_cache_dir] = /var/lib/php/wsdlcache
````l
[root@m01 ansible_role]# cat php/handlers/main.yml
- name: Restart PHP Server
systemd:
name: php-fpm
state: restarted
数据库mariadb部署
[root@m01 ansible_role]# cat mariadb/tasks/main.yml
- name: Installed Mariadb Server
yum:
name: "{{ packages }}"
state: present
vars:
packages:
- mariadb-server
- mariadb
- MySQL-python
- name: Configure Mariadb Server
template:
src: my.cnf.j2
dest: /etc/my.cnf
- name: Systemd Mariadb Server
systemd:
name: mariadb
state: started
enabled: yes
- name: Create Dtabase
mysql_db:
login_user: root
login_password: 123456
name: "{{ item }}"
state: present
loop:
- wordpress
- wecenter
- zrlog
- name: Create Remote User name 'all' and password '123456' with all database privileges
mysql_user:
login_user: root
login_password: 123456
name: all
host: '%'
password: 123456
priv: '*.*:ALL'
state: present
[root@m01 ansible_role]# cat mariadb/templates/my.cnf.j2
[mysqld]
datadir=/var/lib/mysql
socket=/var/lib/mysql/mysql.sock
symbolic-links=0
[mysqld_safe]
log-error=/var/log/mariadb/mariadb.log
pid-file=/var/run/mariadb/mariadb.pid
!includedir /etc/my.cnf.d
共享部署
[root@m01 ansible_role]# cat nfs/tasks/main.yml
- name: Configure NFS Server
template:
src: exports.j2
dest: /etc/exports
owner: root
group: root
mode: 0644
notify: Restart NFS Server
- name: Create NFS Share Directory
file:
path: "{{ item }}"
state: directory
owner: "{{ web_process_user }}"
group: "{{ web_process_group }}"
recurse: yes
loop:
- "{{ nfs_wordpress }}"
- "{{ nfs_wecenter }}"
- "{{ nfs_zrlog }}"
- name: Systemd NFS Server
systemd:
name: nfs
state: started
enabled: yes
[root@m01 ansible_role]# cat nfs/templates/exports.j2
{{ nfs_wordpress }} {{ nfs_share_ip }}(rw,sync,all_squash,anonuid=666,anongid=666)
{{ nfs_wecenter }} {{ nfs_share_ip }}(rw,sync,all_squash,anonuid=666,anongid=666)
{{ nfs_zrlog }} {{ nfs_share_ip }}(rw,sync,all_squash,anonuid=666,anongid=666)
[root@m01 ansible_role]# cat nfs/handlers/main.yml
- name: Restart NFS Server
systemd:
name: nfs
state: restarted
[root@m01 ansible_role]# cat nfs/exports.j2/main.yml
{{ nfs_wordpress }} {{ nfs_share_ip }}(rw,sync,all_squash,anonuid=666,anongid=666)
{{ nfs_wecenter }} {{ nfs_share_ip }}(rw,sync,all_squash,anonuid=666,anongid=666)
{{ nfs_zrlog }} {{ nfs_share_ip }}(rw,sync,all_squash,anonuid=666,anongid=666)
备份服务器部署
[root@m01 ansible_role]# cat backup/tasks/main.yml
- name: install rsync server
yum:
name: rsync
state: present
- name: create backup dir
file:
path: /backup
state: directory
owner: "{{ web_process_user }}"
group: "{{ web_process_group }}"
mode: '0755'
recurse: yes
- name: configure rsync server --passwd
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
mode: "{{ item.mode }}"
loop:
- { src: rsyncd.conf.j2 , dest: /etc/rsyncd.conf , mode: '0644' }
- { src: rsync.passwd.j2 , dest: /etc/rsync.passwd , mode: '0600' }
notify: Restart rsyncd server
- name: Systemd rsyncd Server
systemd:
name: rsyncd
state: started
[root@m01 ansible_role]# cat backup/templates/rsync.passwd.j2
rsync_backup:1
[root@m01 ansible_role]# cat backup/templates/rsyncd.conf.j2
uid = www
gid = www
port = 873
fake super = yes
use chroot = no
max connections = 200
timeout = 600
pid file = /var/run/rsyncd.pid
lock file = /var/run/rsync.lock
log file = /var/log/rsyncd.log
ignore errors
read only = false
list = true
auth users = rsync_backup
secrets file = /etc/rsync.passwd
log file = /var/log/rsyncd.log
#####################################
[backup]
comment = welcome to oldboyedu backup!
path = /backup
[data]
comment = welcome to oldboyedu backup!
path = /data
[root@m01 ansible_role]# cat backup/handlers/main.yml
- name: Restart rsyncd server
systemd:
name: rsyncd
state: restarted
会话共享部署
[root@m01 ansible_role]# cat redis/tasks/main.yml
- name: install redis
yum:
name: redis
state: present
- name: configure redis
template:
src: redis.conf.j2
dest: /etc/redis.conf.j2
owner: redis
group: root
mode: '0644'
notify: Restart Redis Server
- name:
systemd:
name: redis
state: started
enabled: yes
[root@m01 ansible_role]# cat redis/templates/redis.conf.j2
# Redis configuration file example.
#
# Note that in order to read the configuration file, Redis must be
# started with the file path as first argument:
#
# ./redis-server /path/to/redis.conf
[root@m01 ansible_role]# cat redis/handlers/main.yml
- name: Restart Redis Server
systemd:
name: redis
state: restarted
时间同步部署
[root@m01 ansible_role]# cat ntp/tasks/main.yml
- name: install ntp server
yum:
name: ntp
state: present
- name: configure ntp server
template:
src: ntp.conf.j2
dest: /etc/ntp.conf
notify: restart ntpd server
- name: systemd ntp server
systemd:
name: ntpd
state: started
enabled: yes
- name: configure datetime
shell: timedatectl set-timezone Asia/Shanghai
- name: cron ntpdate -u 172.16.1.61
cron:
name: cron ntpdate -u 172.16.1.61
minute: "33"
hour: "23"
job: "ntpdate -u 172.16.1.61 > /dev/null"
[root@m01 ansible_role]# cat ntp/templates/ntp.conf.j2
# For more information about this file, see the man pages
# # ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5).
##
driftfile /var/lib/ntp/drift
#
# # Permit time synchronization with our time source, but do not
# # permit the source to query or modify the service on this system.
restrict default nomodify notrap nopeer noquery
#
# # Permit all access over the loopback interface. This could
# # be tightened as well, but to do so would effect some of
# # the administrative functions.
restrict 127.0.0.1
restrict ::1
#
# # Hosts on local network are less restricted.
# #restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap
#
# # Use public servers from the pool.ntp.org project.
# # Please consider joining the pool (http://www.pool.ntp.org/join.html).
# #server 0.centos.pool.ntp.org iburst
# #server 1.centos.pool.ntp.org iburst
# #server 2.centos.pool.ntp.org iburst
# #server 3.centos.pool.ntp.org iburst
#
server 172.16.1.61
#
restrict 172.16.1.61 nomodify notrap noquery
#
server 127.0.0.1 # local clock
fudge 127.0.0.1 stratum 10
[root@m01 ansible_role]# cat ntp/handlers/main.yml
- name: restart ntpd server
systemd:
name: ntpd
state: restarted
实时同步部署
[root@m01 ansible_role]# cat nfs-lsyncd/tasks/main.yml
- name: Yum Install Lsync
yum:
name: rsync
state: present
- name: Yum Install Lsync
yum:
name: lsyncd
state: present
- name: Configure Conf
template:
src: lsyncd.conf.j2
dest: /etc/lsyncd.conf
owner: root
group: root
mode: '0644'
- name: Configure Conf
template:
src: rsync.pwd.j2
dest: /etc/rsync.pwd
owner: root
group: root
mode: '0600'
notify: restarted lsync
- name: Started Lsync
systemd:
name: lsyncd
state: started
[root@m01 ansible_role]# cat nfs-lsyncd/templates/rsync.pwd.j2
1
[root@m01 ansible_role]# cat nfs-lsyncd/templates/lsyncd.conf.j2
settings {
logfile = "/var/log/lsyncd/lsyncd.log",
statusFile = "/var/log/lsyncd/lsyncd.status",
inotifyMode = "CloseWrite",
maxProcesses = 8,
}
sync {
default.rsync,
source = "/data",
target = "[email protected]::backup",
delete= true,
exclude = { ".*" },
delay = 1,
rsync = {
binary = "/usr/bin/rsync",
archive = true,
compress = true,
verbose = true,
password_file = "/etc/rsync.pwd",
_extra = {"--bwlimit=200"}
}
}
[root@m01 ansible_role]# cat nfs-lsyncd/handlers/main.yml
- name: restarted lsync
systemd:
name: lsyncd
state: restarted
高可用部署
[root@m01 ansible_role]# cat keepalived/tasks/main.yml
- name: install keepalived
yum:
name: keepalived
state: present
- name: configure keepalived
template:
src: keepalived.conf.j2
dest: /etc/keepalived/keepalived.conf
notify: Restart Keepalived
- name: systemd keepalived
systemd:
name: keepalived
state: started
enabled: yes
[root@m01 ansible_role]# cat keepalived/templates/keepalived.conf.j2
glabal_defs {
router_id {{ ansible_hostname }}
}
vrrp_instance VIP_1 {
{% if ansible_hostname == "lb01" %}
state MASTER
priority 150
{% elif ansible_hostname == "lb02" %}
state BACKUP
priority 100
{% endif %}
interface eth0
virtual_router_id 1
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.3
}
}
[root@m01 ansible_role]# cat keepalived/handlers/main.yml
- name: Restart Keepalived
systemd:
name: keepalived
state: restarted
内部共享上网部署
[root@m01 ansible_role]# cat firewalld/tasks/main.yml
- name: config eth1
shell: "{{ item.shell }}"
loop:
- { shell: 'echo "GATEWAY={{ gateway_ip }}" >> /etc/sysconfig/network-scripts/ifcfg-eth1' }
- { shell: 'echo "DNS1={{ dns1_ip }}" >> /etc/sysconfig/network-scripts/ifcfg-eth1' }
- name: Systemd Network server
systemd:
name: network
state: restarted
- name: Configure eth0
shell: "ifdown eth0"
变量
[root@m01 ansible_role]# cat group_vars/all
#php packages
packages:
- nginx
- php71w
- php71w-cli
- php71w-common
- php71w-devel
- php71w-embedded
- php71w-gd
- php71w-mbstring
- php71w-pdo
- php71w-xml
- php71w-fpm
- php71w-mysqlnd
- php71w-opcache
- php71w-mcrypt
- php71w-pecl-memcached
- php71w-pecl-mongodb
- php71w-pecl-redis
- php71w-pecl-zip
- php71w-bcmath
# kod web
redis_server_ip: 172.16.1.51
redis_server_port: 6379
web_process_user: www
web_process_group: www
cloud_server_name: ansible.littlesun.com
cloud_server_port: 80
cloud_code_path: /code/ansible
# kod proxy
cloud_proxy_port: 80
#wordpress
blog_server_name: blog.littlesun.com
blog_server_port: 80
blog_code_path: /code/wordpress
#zh
zh_code_path: /code/zh
zh_server_port: 80
zh_server_name: zh.littlesun.com
#zh-proxy
zh_proxy_port: 80
zh_proxy_https_port: 443
ssl_certifi: /etc/nginx/ssl_key/server.crt
ssl_certifi_key: /etc/nginx/ssl_key/server.key
#wordpress-proxy
blog_proxy_port: 80
blog_proxy_https_port: 443
ssl_certifi: /etc/nginx/ssl_key/server.crt
ssl_certifi_key: /etc/nginx/ssl_key/server.key
#mysql
mariadb_master: 172.16.1.51
# nfs
nfs_wordpress: /data/wordpress
nfs_wecenter: /data/wecenter
nfs_zrlog: /data/zrlog
nfs_share_ip: 172.16.1.0/24
#firewalld
gateway_ip: 172.16.1.61
dns1_ip: 223.5.5.5
wordpress部署
[root@m01 ansible_role]# cat wordpress-web/tasks/main.yml
- name: Create Nginx VirtHost Configure
template:
src: blog.oldxu.com.conf.j2
dest: /etc/nginx/conf.d/blog.oldxu.com.conf
notify: Restart Nginx Server
- name: Create Wordpress Directory
file:
path: "{{ blog_code_path }}"
state: directory
owner: "{{ web_process_user }}"
group: "{{ web_process_group }}"
recurse: yes
- name: Copy Wordpress Code
unarchive:
src: wordpress.zip
dest: "{{ blog_code_path }}"
copy: yes
owner: "{{ web_process_user }}"
group: "{{ web_process_group }}"
creates: "{{ blog_code_path }}/wp-config.php"
- name: Push Wordpress Sql File
copy:
src: wordpress.sql
dest: /tmp/wordpress.sql
- name: Import Wordpress Sql
mysql_db:
login_host: "{{ mariadb_master }}"
login_user: all
login_password: 123456
name: wordpress
state: import
target: /tmp/wordpress.sql
[root@m01 ansible_role]# cat wordpress-web/templates/blog.oldxu.com.conf.j2
server {
listen {{ blog_server_port }};
server_name {{ blog_server_name }};
root {{ blog_code_path }};
location / {
index index.php;
}
location ~ \.php$ {
fastcgi_pass 127.0.0.1:9000;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
fastcgi_param HTTPS on;
include fastcgi_params;
}
}
[root@m01 ansible_role]# cat wordpress-web/meta/main.yml
dependencies:
- nginx-web
- php
[root@m01 ansible_role]# cat wordpress-web/files/wordpress.sql
-- MySQL dump 10.14 Distrib 5.5.64-MariaDB, for Linux (x86_64)
--
-- Host: localhost Database: wordpress
-- ------------------------------------------------------
-- Server version 5.5.64-MariaDB
[root@m01 ansible_role]# cat wordpress-web/files/wordpress.zip
[root@m01 ansible_role]# cat wordpress-web/handlers/main.yml
- name: Restart Nginx Server
systemd:
name: nginx
state: restarted
负载均衡接入部署
[root@m01 ansible_role]# cat wordpress-proxy/tasks/main.yml
- name: Import SSL Key
copy:
src: ssl_key
dest: /etc/nginx/
- name: Create Nginx Proxy VirtHost Configure
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
loop:
- { src: proxy_params.j2 , dest: /etc/nginx/proxy_params }
- { src: proxy_blog.oldxu.com.conf.j2 , dest: /etc/nginx/conf.d/proxy_blog.oldxu.com.conf }
notify: Restart Nginx Server
[root@m01 ansible_role]# cat wordpress-proxy/templates/proxy_params.j2
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_connect_timeout 30;
proxy_send_timeout 60;
proxy_read_timeout 60;
proxy_buffering on;
proxy_buffer_size 32k;
proxy_buffers 4 128k;
[root@m01 ansible_role]# cat wordpress-proxy/templates/proxy_blog.oldxu.com.conf.j2
##
upstream {{ blog_server_name }} {
{% for host in groups['web'] %}
server {{ host }}:{{ blog_server_port }};
{% endfor %}
}
server {
listen {{ blog_proxy_https_port }} ssl;
ssl_certificate {{ ssl_certifi }};
ssl_certificate_key {{ ssl_certifi_key }};
server_name {{ blog_server_name }};
location / {
proxy_pass http://{{ blog_server_name }};
include proxy_params;
}
}
server {
listen {{ blog_proxy_port }};
server_name {{ blog_server_name }};
return 302 https://{{ blog_server_name }}$request_uri;
}
[root@m01 ansible_role]# cat wordpress-proxy/meta/main.yml
dependencies:
- nginx-web
cat wordpress-proxy/files/ssl_key/server.crt
-----BEGIN CERTIFICATE-----
MIID9zCCAt+gAwIBAgIJAOLMcwpV35PvMA0GCSqGSIb3DQEBCwUAMIGQMQswCQYD
VQQGEwJDTjEOMAwGA1UECAwFY2hpbmExEDAOBgNVBAcMB2JlaWppbmcxDjAMBgNV
BAoMBXNoYWhlMRYwFAYDVQQLDA1ldGlhbnRpYW4ub3JnMRYwFAYDVQQDDA1ldGlh
bnRpYW4ub3JnMR8wHQYJKoZIhvcNAQkBFhA1NTI0MDg5MjVAcXEuY29tMCAXDTE5
MTIxMjAyMDQ0OVoYDzIxMTkxMTE4MDIwNDQ5WjCBkDELMAkGA1UEBhMCQ04xDjAM
BgNVBAgMBWNoaW5hMRAwDgYDVQQHDAdiZWlqaW5nMQ4wDAYDVQQKDAVzaGFoZTEW
MBQGA1UECwwNZXRpYW50aWFuLm9yZzEWMBQGA1UEAwwNZXRpYW50aWFuLm9yZzEf
MB0GCSqGSIb3DQEJARYQNTUyNDA4OTI1QHFxLmNvbTCCASIwDQYJKoZIhvcNAQEB
BQADggEPADCCAQoCggEBALGmznkmkXeJWrfXQKb7LmZFdqBrxGQ1nhayWC4/cpsE
D279O/YvzLfyt19Q0MEkG9TjNF1GEYW2YwX5/rx4Z2npaW+XizG6BfGYD7hDboSn
/wrSSxqZw2+d0nbjySXsjH0EM4yFzytknOPI4Ns1cMJCivabMHi2zeyE7w0XODQr
x5w95YS4dp7ex3HvVOe0zpEXhkajJhKQmb5pepnafM9MPDxPP5eXllVNMgiL0vTl
SPqwY15BlB1gHhH8e7FE7Qm2Auh3Bdgai8RNvo0pkNBZaO9y2AVdU4D+18ab80Jh
8HDlJFCcXTonNJkInrfGnzOaPTy1KQb/sFaGJIFmCc0CAwEAAaNQME4wHQYDVR0O
BBYEFCt6GdqD3p3VzTUu1/VW3R6LPuMQMB8GA1UdIwQYMBaAFCt6GdqD3p3VzTUu
1/VW3R6LPuMQMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFTyeETT
OHJ8Cgno+7RO9ntwvo6nLbwnWxpBnXA/cAEaYWgIZh6a9bwSswoFPhatpggP/7Or
/KNjuOQTA8PLfIeaAr3oWMuHRng8aXgKxT6XYyQQZT61CpyDb3ztiuqHVszAvyEO
78+LACg2WHdrqvxJ8TLmU9xS2Mg0a/v/Utw0J0RIeWWrUPMHGMGw+XGUJmOKazXw
vh/DldY6dWu7OpxXaYfsuG90SUmu6rzxFV3CvtMEa1MIJWYUOZR4Z2o6x3XLPT2A
e0+/KgoRA2sXC9obim/9SqWrG3nbI9NHoI3Tmqu/vWem7mlDOIp6FapMIqaNnRQJ
XP5U7XhCBN0/Ass=
-----END CERTIFICATE-----
[root@m01 ansible_role]# cat wordpress-proxy/files/ssl_key/server.key
-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCxps55JpF3iVq3
10Cm+y5mRXaga8RkNZ4WslguP3KbBA9u/Tv2L8y38rdfUNDBJBvU4zRdRhGFtmMF
+f68eGdp6Wlvl4sxugXxmA+4Q26Ep/8K0ksamcNvndJ248kl7Ix9BDOMhc8rZJzj
yODbNXDCQor2mzB4ts3shO8NFzg0K8ecPeWEuHae3sdx71TntM6RF4ZGoyYSkJm+
aXqZ2nzPTDw8Tz+Xl5ZVTTIIi9L05Uj6sGNeQZQdYB4R/HuxRO0JtgLodwXYGovE
Tb6NKZDQWWjvctgFXVOA/tfGm/NCYfBw5SRQnF06JzSZCJ63xp8zmj08tSkG/7BW
hiSBZgnNAgMBAAECggEBAK9kwPVb3sLmGWroWiyCUY0nh7darstBJJPEAMuoWL4z
hAlQe6LUDoqOV/0DDRejTtcgetKcGzCooZuE9rudJHystiw2snal8rhUKTEDYloW
gvdsKDpbFVlduC0pN675nLrRKxPSCHgPHdbGvTmvyWQBzrErBvAni6e4l0uVYSgf
ayO6R2TTF1oFnowRR7M6VZJQG17eXqooD2tr/qTgfhlSyjRO9EsxX3NVlLDVA9oo
2NlHKIQaKYHDlvzreIhnFCYopjQLPydWmxig50FQUprbagS0EaIqyPRJmaTR53e9
AziJQjsc3eJQaR9oZQocNylca78nq8KibV4PBURZ/QECgYEA5C35hKIxOghmk8Cu
4dys3ckVjVGQNMjxLBelm7IKYZHvRg3suwSS2//3WacLMtDZq2ZQgn4ESeVaqhmY
Yir3ypMXBcFA45gHA0BpvzvCyYHhN4tl8Qo82mP/eufvYV1MNF0Vp8/VSIPV5Juy
otW6jZX1xdhzOXZe1UUOXfKMKocCgYEAx0+7uaPhJJtrUT+fU6Bra0mBKKEIEGY1
nCp0crdTYuyFVd5E+/z3ZFktTJS9cRwE1ITNQpJ97rKm5/DI672hV5YIRvpzNYw1
M4Bchyiq0IEu/p7uLMe9CteLFQbDT4O0xf59t/2+Hpc+BdyaFMBdWH5raNlrSw+v
mwDFHpmdmgsCgYEAkUGTLhPyMndWlYh7y9ynTyjLR4M8wd2JagBDtLHKWgKEx5vl
r8hhlRuFYtUsE9VQPASODAQcxs3ywYY5s4SW6aZpaYh0vm3xSOigaNSqU40SykE7
TA6wXnZ6FMvBX8H82PjI6GMt61PBvh4/wdk9QRTNuHzqHRJ1nF6xnqzg/dECgYB5
fjvzbFcYZnjjfQHVCyuPqa3Go2pz/KnfOO3k46U73utkLWolpMz06Xs4fE6CPU8g
cJdiZkPHljUxm1AOi3hnGHiRM6e402+2ph9A8GQ3Vxfbc+vBdjvg+QS523aA6Mrw
CjJ+Tnlu0kincBKoZZ5BDEbTEf1aMwSbWrvN7FvqgwKBgCKcn2LGnqOGLuXmFE8s
SoA+lxUuuKD7qOwIKxJZ8FgdW6qZc8NTOEd7+dFaZmSZiA2zAVK+0CPfUIU4eymD
UD/0Da4qWyBuP3gUEcx3xOaRMkx/wLjVsA3HlqXmFxD+I8IQXKUNgYDCpDkq1rO3
9SxHO/gBYebaDG0vN4DZzYso
-----END PRIVATE KEY-----
[root@m01 ansible_role]# cat wordpress-proxy/handlers/main.yml
- name: Restart Nginx Server
systemd:
name: nginx
state: restarted
可道云部署
[root@m01 ansible_role]# cat kodcloud-web/tasks/main.yml
- name: create nginx
template:
src: ansible.oldxu.com.conf.j2
dest: /etc/nginx/conf.d/ansible.oldxu.com.conf
notify: Restart Nginx Server
- name: create cloud directory
file:
path: "{{ cloud_code_path }}"
state: directory
owner: "{{ web_process_user }}"
group: "{{ web_process_group }}"
recurse: yes
- name: copy cloud code
unarchive:
src: kodexplorer4.40.zip
dest: "{{ cloud_code_path }}"
copy: yes
owner: "{{ web_process_user }}"
group: "{{ web_process_group }}"
creates: "{{ cloud_code_path }}/index.php"
````
````k
[root@m01 ansible_role]# cat kodcloud-web/templates/ansible.oldxu.com.conf.j2
server {
listen {{ cloud_server_port }};
server_name {{ cloud_server_name }};
root {{ cloud_code_path }};
location / {
index index.php;
}
location ~ \.php$ {
fastcgi_pass 127.0.0.1:9000;
fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
include fastcgi_params;
}
}
[root@m01 ansible_role]# cat kodcloud-web/handlers/main.yml
- name: Restart Nginx Server
systemd:
name: nginx
state: restarted
[root@m01 ansible_role]# cat kodcloud-web/meta/main.yml
dependencies:
- nginx-web
- php
[root@m01 ansible_role]# cat kodcloud-web/files/kodexplorer4.40.zip
可道云接入负载均衡
[root@m01 ansible_role]# cat kodcloud-proxy/tasks/main.yml
- name: create nginx proxy
template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
loop:
- { src: proxy_params.j2 , dest: /etc/nginx/proxy_params }
- { src: proxy_ansible.oldxu.com.conf.j2 , dest: /etc/nginx/conf.d/proxy_ansible.oldxu.com.conf }
notify: Restart Nginx Server
[root@m01 ansible_role]# cat kodcloud-proxy/templates/proxy_params.j2
proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_http_version 1.1;
proxy_set_header Connection "";
proxy_connect_timeout 30;
proxy_send_timeout 60;
proxy_read_timeout 60;
proxy_buffering on;
proxy_buffer_size 32k;
proxy_buffers 4 128k;
[root@m01 ansible_role]# cat kodcloud-proxy/templates/proxy_ansible.oldxu.com.conf.j2
#
upstream {{ cloud_server_name }} {
{% for host in groups['web'] %}
server {{ host }}:{{ cloud_server_port }};
{% endfor %}
}
server {
listen {{ cloud_proxy_port }};
server_name {{ cloud_server_name }};
location /{
proxy_pass http://{{ cloud_server_name }};
include proxy_params;
}
}
[root@m01 ansible_role]# cat kodcloud-proxy/meta/main.yml
dependencies:
- nginx-web
[root@m01 ansible_role]# cat kodcloud-proxy/handlers/main.yml
- name: Restart Nginx Server
systemd:
name: nginx
state: restarted
zrlog部署(手动)
安装java,设置变量
export JAVA_HOME=/usr/java/jdk1.8.0_221
export PATH=$JAVA_HOME/bin:$PATH
安装tomcat,激活
[root@web01 bin]# ./startup.sh
Using CATALINA_BASE: /usr/tomcat/apache-tomcat-8.5.50
Using CATALINA_HOME: /usr/tomcat/apache-tomcat-8.5.50
Using CATALINA_TMPDIR: /usr/tomcat/apache-tomcat-8.5.50/temp
Using JRE_HOME: /usr/java/jdk1.8.0_221
Using CLASSPATH: /usr/tomcat/apache-tomcat-8.5.50/bin/bootstrap.jar:/usr/tomcat/apache-tomcat-8.5.50/bin/tomcat-juli.jar
Tomcat started.
[root@web01 bin]# pwd
/usr/tomcat/apache-tomcat-8.5.50/bin
[root@web01 bin]# cat /etc/nginx/conf.d/zrlog.oldxu.com.conf
server {
listen 8000;
server_name zrlog.littlesun.com;
location / {
proxy_pass http://127.0.0.1:8080;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}
[root@lb01 ~]# cat /etc/nginx/conf.d/proxy_zrlog.oldxu.com.conf
upstream zrlog {
server 172.16.1.7:8000;
server 172.16.1.8:8000;
}
server {
listen 443 ssl;
server_name zrlog.littlesun.com;
ssl_certificate ssl_key/server.crt;
ssl_certificate_key ssl_key/server.key;
location / {
proxy_pass http://zrlog;
include proxy_params;
}
}
server {
listen 80;
server_name zrlog.littlesun.com;
return 302 https://$http_host$request_uri;
}
### 域名劫持网站测试
![在这里插入图片描述](https://img-blog.csdnimg.cn/20200107165812692.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3dlaXhpbl80NTU4MTQ4Nw==,size_16,color_FFFFFF,t_70)
![在这里插入图片描述](https://img-blog.csdnimg.cn/20200107165833510.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3dlaXhpbl80NTU4MTQ4Nw==,size_16,color_FFFFFF,t_70)
![在这里插入图片描述](https://img-blog.csdnimg.cn/20200107165841671.png?x-oss-process=image/watermark,type_ZmFuZ3poZW5naGVpdGk,shadow_10,text_aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3dlaXhpbl80NTU4MTQ4Nw==,size_16,color_FFFFFF,t_70)
检查挂载
[root@web01 ~]# df -h
文件系统 容量 已用 可用 已用% 挂载点
/dev/sda1 100G 3.5G 97G 4% /
devtmpfs 381M 0 381M 0% /dev
tmpfs 392M 0 392M 0% /dev/shm
tmpfs 392M 12M 380M 3% /run
tmpfs 392M 0 392M 0% /sys/fs/cgroup
tmpfs 79M 0 79M 0% /run/user/0
172.16.1.31:/data/wecenter 100G 1.9G 99G 2% /code/zh/uploads/article
172.16.1.31:/data/zrlog 100G 1.9G 99G 2% /usr/tomcat/apache-tomcat-8.5.50/webapps/ROOT/attached
检查实施同步
[root@nfs01 ~]# ll /data/
[root@backup ~]# ll /backup/
执行m01开启防火墙检查内部共享上网
firewall-cmd --add-masquerade
lb开启防火墙设置开放端口
[root@lb01 ~]# cat firewall
firewall-cmd --zone=public --add-port=80/tcp
firewall-cmd --zone=public --add-port=80/tcp --permanent
堡垒机安装(m01)10.0.0.61:7190
连接测试