离线采集普遍解决方案

简介

使用Datax每日全量相关全量表,使用Maxwell增量采集到Kafka然后到Flume然后到Hdfs。

DataX全量

生成模板Json

gen_import_config.py

# coding=utf-8
import json
import getopt
import os
import sys
import MySQLdb

#MySQL相关配置,需根据实际情况作出修改
mysql_host = "master"
mysql_port = "3306"
mysql_user = "root"
mysql_passwd = "root"

#HDFS NameNode相关配置,需根据实际情况作出修改
hdfs_nn_host = "master"
hdfs_nn_port = "8020"

#生成配置文件的目标路径,可根据实际情况作出修改
output_path = "/home/bigdata/datax/datax/job/pyjson"

#获取mysql连接
def get_connection():
    return MySQLdb.connect(host=mysql_host, port=int(mysql_port), user=mysql_user, passwd=mysql_passwd)

#获取表格的元数据  包含列名和数据类型
def get_mysql_meta(database, table):
    connection = get_connection()
    cursor = connection.cursor()
    sql = "SELECT COLUMN_NAME,DATA_TYPE from information_schema.COLUMNS WHERE TABLE_SCHEMA=%s AND TABLE_NAME=%s ORDER BY ORDINAL_POSITION"
    cursor.execute(sql, [database, table])
    fetchall = cursor.fetchall()
    cursor.close()
    connection.close()
    return fetchall

#获取mysql表的列名
def get_mysql_columns(database, table):
    return map(lambda x: x[0], get_mysql_meta(database, table))

#将获取的元数据中mysql的数据类型转换为hive的数据类型  写入到hdfswriter中
def get_hive_columns(database, table):
    def type_mapping(mysql_type):
        mappings = {
            "bigint": "bigint",
            "int": "bigint",
            "smallint": "bigint",
            "tinyint": "bigint",
            "decimal": "string",
            "double": "double",
            "float": "float",
            "binary": "string",
            "char": "string",
            "varchar": "string",
            "datetime": "string",
            "time": "string",
            "timestamp": "string",
            "date": "string",
            "text": "string"
        }
        return mappings[mysql_type]

    meta = get_mysql_meta(database, table)
    return map(lambda x: {"name": x[0], "type": type_mapping(x[1].lower())}, meta)

#生成json文件
def generate_json(source_database, source_table):
    job = {
        "job": {
            "setting": {
                "speed": {
                    "channel": 3
                },
                "errorLimit": {
                    "record": 0,
                    "percentage": 0.02
                }
            },
            "content": [{
                "reader": {
                    "name": "mysqlreader",
                    "parameter": {
                        "username": mysql_user,
                        "password": mysql_passwd,
                        "column": get_mysql_columns(source_database, source_table),
                        "splitPk": "",
                        "connection": [{
                            "table": [source_table],
                            "jdbcUrl": ["jdbc:mysql://" + mysql_host + ":" + mysql_port + "/" + source_database]
                        }]
                    }
                },
                "writer": {
                    "name": "hdfswriter",
                    "parameter": {
                        "defaultFS": "hdfs://" + hdfs_nn_host + ":" + hdfs_nn_port,
                        "fileType": "text",
                        "path": "${targetdir}",
                        "fileName": source_table,
                        "column": get_hive_columns(source_database, source_table),
                        "writeMode": "append",
                        "fieldDelimiter": "\t",
                        "compress": "gzip"
                    }
                }
            }]
        }
    }
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    with open(os.path.join(output_path, ".".join([source_database, source_table, "json"])), "w") as f:
        json.dump(job, f)


def main(args):
    source_database = ""
    source_table = ""

    options, arguments = getopt.getopt(args, '-d:-t:', ['sourcedb=', 'sourcetbl='])
    for opt_name, opt_value in options:
        if opt_name in ('-d', '--sourcedb'):
            source_database = opt_value
        if opt_name in ('-t', '--sourcetbl'):
            source_table = opt_value

    generate_json(source_database, source_table)


if __name__ == '__main__':
    main(sys.argv[1:])

上面脚本可能会有点问题,下面是改版以后的脚本(MySQLdb模块可能没有了)

pip install pymysql
#!/usr/bin/env python
# coding=utf-8
# -*- coding=utf-8
# coding=utf-8
# python gen_import_config.py  -d 数据库 -t 表
import json
import getopt
import os
import sys
import pymysql
 
#MySQL相关配置,需根据实际情况作出修改
mysql_host = ""
mysql_port = 3306
mysql_user = ""
mysql_passwd = ""
 
#HDFS NameNode相关配置,需根据实际情况作出修改
hdfs_nn_host = "master"
hdfs_nn_port = "8020"

 
#生成配置文件的目标路径,可根据实际情况作出修改
output_path = "/home/bigdata/test"
 
#获取表格的元数据  包含列名和数据类型
def get_mysql_meta(database, table):
    connection = pymysql.connect(
                 host=mysql_host,  # 连接地址, 本地
                 user=mysql_user,    # 用户
                 password=mysql_passwd,  # 数据库密码,记得修改为自己本机的密码
                 port=mysql_port,
                 connect_timeout=10000
                 )
    cursor = connection.cursor()
    sql = "SELECT COLUMN_NAME,DATA_TYPE from information_schema.COLUMNS WHERE TABLE_SCHEMA=%s AND TABLE_NAME=%s ORDER BY ORDINAL_POSITION"
    cursor.execute(sql, [database, table])
    fetchall = cursor.fetchall()
    cursor.close()
    connection.close()
    return fetchall
 
#获取mysql表的列名
def get_mysql_columns(database, table):
    return map(lambda x: x[0], get_mysql_meta(database, table))
 
#将获取的元数据中mysql的数据类型转换为hive的数据类型  写入到hdfswriter中
def get_hive_columns(database, table):
    def type_mapping(mysql_type):
        mappings = {
            "bigint": "bigint",
            "int": "bigint",
            "smallint": "bigint",
            "tinyint": "bigint",
            "decimal": "string",
            "double": "double",
            "float": "float",
            "binary": "string",
            "char": "string",
            "varchar": "string",
            "datetime": "string",
            "time": "string",
            "timestamp": "string",
            "date": "string",
            "text": "string"
        }
        return mappings[mysql_type]
 
    meta = get_mysql_meta(database, table)
    return map(lambda x: {"name": x[0], "type": type_mapping(x[1].lower())}, meta)
 
#生成json文件
def generate_json(source_database, source_table):
    job = {
        "job": {
            "setting": {
                "speed": {
                    "channel": 3
                },
                "errorLimit": {
                    "record": 0,
                    "percentage": 0.02
                }
            },
            "content": [{
                "reader": {
                    "name": "mysqlreader",
                    "parameter": {
                        "username": mysql_user,
                        "password": mysql_passwd,
                        "column": list(get_mysql_columns(source_database, source_table)),
                        "splitPk": "",
                        "connection": [{
                            "table": [source_table],
                            "jdbcUrl": ["jdbc:mysql://" + mysql_host + ":" + str(mysql_port) + "/" + source_database]
                        }]
                    }
                },
                "writer": {
                    "name": "hdfswriter",
                    "parameter": {
                        "defaultFS": "hdfs://" + hdfs_nn_host + ":" + hdfs_nn_port,
                        "fileType": "text",
                        "path": "${targetdir}",
                        "fileName": source_table,
                        "column": list(get_hive_columns(source_database, source_table)),
                        "writeMode": "append",
                        "fieldDelimiter": "\t",
                        "compress": "gzip"
                    }
                }
            }]
        }
    }
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    with open(os.path.join(output_path, ".".join([source_database, source_table, "json"])), "w") as f:
        json.dump(job, f)
 
 
def main(args):
    source_database = ""
    source_table = ""
 
    options, arguments = getopt.getopt(args, '-d:-t:', ['sourcedb=', 'sourcetbl='])
    for opt_name, opt_value in options:
        if opt_name in ('-d', '--sourcedb'):
            source_database = opt_value
        if opt_name in ('-t', '--sourcetbl'):
            source_table = opt_value
    generate_json(source_database, source_table)
 
if __name__ == '__main__':
    main(sys.argv[1:])

脚本使用方法(执行以后就会生成表对应的json配置文件)

allfile.sh

#!/bin/bash

python gen_import_config.py  -d 数据库 -t 表名
python gen_import_config.py  -d 数据库 -t 表名
python gen_import_config.py  -d 数据库 -t 表名
python gen_import_config.py  -d 数据库 -t 表名
python gen_import_config.py  -d 数据库 -t 表名

全量导入到hdfs样例脚本

mysql-to-hdfs-datax.sh

#!/bin/bash
# mysql_to_hdfs_full.sh all 使用例子,改datax的home,还有改配置文件的地址就可以用了
DATAX_HOME=/home/bigdata/datax/datax

# 如果传入日期则do_date等于传入的日期,否则等于前一天日期,也就是昨天
if [ -n "$2" ] ;then
    do_date=$2
else
    do_date=`date -d "-1 day" +%F`
fi

#处理目标路径,此处的处理逻辑是,如果目标路径不存在,则创建;若存在,则清空,目的是保证同步任务可重复执行
handle_targetdir() {
  hadoop fs -test -e $1
  if [[ $? -eq 1 ]]; then
    echo "路径$1不存在,正在创建......"
    hadoop fs -mkdir -p $1
  else
    echo "路径$1已经存在"
    fs_count=$(hadoop fs -count $1)
    content_size=$(echo $fs_count | awk '{print $3}')
    if [[ $content_size -eq 0 ]]; then
      echo "路径$1为空"
    else
      echo "路径$1不为空,正在清空......"
      hadoop fs -rm -r -f $1/*
    fi
  fi
}

#数据同步
import_data() {
#$1 /home/bigdata/datax/datax/job/pyjson/bigdata.activity_info.json
#$2 /origin_data/bigdata/db/activity_info_full/$do_date
  datax_config=$1
  target_dir=$2

  handle_targetdir $target_dir
  python $DATAX_HOME/bin/datax.py -p"-Dtargetdir=$target_dir" $datax_config
}

case $1 in
"activity_info")
#/home/bigdata/datax/datax/job/pyjson改成自己文件生成的路径
  import_data /home/bigdata/datax/datax/job/pyjson/bigdata.activity_info.json /origin_data/bigdata/full_db/activity_info_full/$do_date
  ;;
"all")
  import_data /home/bigdata/datax/datax/job/pyjson/bigdata.activity_info.json /origin_data/bigdata/full_db/activity_info_full/$do_date
  ;;
esac

Maxwell增量

创建Kafka主题

createtopic.sh

#!/bin/bash
/home/bigdata/kafka/kafka_2.11-2.4.1/bin/kafka-topics.sh --bootstrap-server master:9092,node1:9092 --partitions 3 --replication-factor 3  --create --topic 表名           
/home/bigdata/kafka/kafka_2.11-2.4.1/bin/kafka-topics.sh --bootstrap-server master:9092,node1:9092 --partitions 3 --replication-factor 3  --create --topic 表名        
/home/bigdata/kafka/kafka_2.11-2.4.1/bin/kafka-topics.sh --bootstrap-server master:9092,node1:9092 --partitions 3 --replication-factor 3  --create --topic 表名
/home/bigdata/kafka/kafka_2.11-2.4.1/bin/kafka-topics.sh --bootstrap-server master:9092,node1:9092 --partitions 3 --replication-factor 3  --create --topic 表名
/home/bigdata/kafka/kafka_2.11-2.4.1/bin/kafka-topics.sh --bootstrap-server master:9092,node1:9092 --partitions 3 --replication-factor 3  --create --topic 表名

maxwell配置文件

# tl;dr config
log_level=info
client_id=fy_client_test02
replica_server_id=12302
 
producer=kafka
kafka.compression.type=snappy
kafka.retries=3
kafka.acks=-1
#kafka.batch.size=16384
kafka.bootstrap.servers=cdh-server:9092,agent01:9092,agent02:9092
kafka_topic=%{database}_%{table}
#元数据库
host=cdh-server
port=3306
user=maxwell
password=密码
jdbc_options=useSSL=false&serverTimezone=Asia/Shanghai&characterEncoding=utf-8

#output_ddl=true,数据在对应kafka分区打散
producer_partition_by=primary_key
kafka_partition_hash=murmur3
 
 
#目标库
replication_host=
replication_user=
replication_password=
replication_port=3306
#目标库
replication_jdbc_options=useSSL=false&serverTimezone=Asia/Shanghai&characterEncoding=utf-8
#jdbc_options=useSSL=false&serverTimezone=UTC&characterEncoding=utf-8
filter=exclude: *.*,include: 数据库.表,include: 数据库.表,include: 数据库.表,include: 数据库.表

启动maxwell脚本

startmaxwell.sh

#!/bin/bash
#/home/bigdata/maxwell/maxwell-1.29.2/bin/maxwell --config /home/bigdata/maxwell/maxwell-1.29.2/config.properties --daemon
MAXWELL_HOME=/home/bigdata/maxwell/maxwell-1.29.2
 
status_maxwell(){
    result=`ps -ef | grep maxwell | grep -v grep | wc -l`
    return $result
}
 
 
start_maxwell(){
    status_maxwell
    if [[ $? -lt 1 ]]; then
        echo "启动Maxwell"
        $MAXWELL_HOME/bin/maxwell --config $MAXWELL_HOME/config.properties --daemon
    else
        echo "Maxwell正在运行"
    fi
}
 
 
stop_maxwell(){
    status_maxwell
    if [[ $? -gt 0 ]]; then
        echo "停止Maxwell"
        ps -ef | grep maxwell | grep -v grep | awk '{print $2}' | xargs kill -9
    else
        echo "Maxwell未在运行"
    fi
}
 
 
case $1 in
    "start" )
        start_maxwell
    ;;
    "stop" )
        stop_maxwell
    ;;
    "restart" )
       stop_maxwell
       start_maxwell
    ;;
esac

简单脚本

#!/bin/bash
/home/bigdata/maxwell/maxwell-1.29.2/bin/maxwell --config /home/bigdata/maxwell/maxwell-1.29.2/config.properties --daemon

首日全量导入

alldatatohdfs.sh

#!/bin/bash

# 该脚本的作用是初始化所有的增量表,只需执行一次

MAXWELL_HOME=/home/bigdata/maxwell/maxwell-1.29.2

import_data() {
    $MAXWELL_HOME/bin/maxwell-bootstrap --database 库名 --table $1 --config $MAXWELL_HOME/config.properties
}

case $1 in
"cart_info")
  import_data cart_info
  ;;
"all")
  import_data user_info
  ;;
esac

Flume

获取maxwell到kafka的数据

扫描二维码关注公众号,回复: 14715481 查看本文章

启停脚本

#!/bin/bash

case $1 in
"start"){
        for i in node1
        do
                echo " --------启动 $i 采集flume-------"
                ssh $i "nohup /home/bigdata/flume/flume-1.9.0/bin/flume-ng agent --conf /home/bigdata/flume/flume-1.9.0/conf --conf-file /home/bigdata/shell/maxwelltoktoh/flumeconf/kafka-flume-hdfs-inc.conf --name a1 -Dflume.root.logger=INFO,console >/dev/null 2>&1 &"
        done
};; 
"stop"){
        for i in node1
        do
                echo " --------停止 $i 采集flume-------"
                ssh $i " ps -ef | grep kafka-flume-hdfs-inc.conf | grep -v grep |awk  '{print \$2}' | xargs -n1 kill -9 "
        done

};;
esac

配置文件

kafka-flume-hdfs-inc.conf

a1.sources = r1
a1.channels = c1
a1.sinks = k1

a1.sources.r1.type = org.apache.flume.source.kafka.KafkaSource
a1.sources.r1.batchSize = 5000
a1.sources.r1.batchDurationMillis = 2000
a1.sources.r1.kafka.bootstrap.servers = master:9092,node1:9092,node2:9092
a1.sources.r1.kafka.topics = activity_info,user_info
a1.sources.r1.kafka.consumer.group.id = flume
a1.sources.r1.setTopicHeader = true
a1.sources.r1.topicHeader = topic
a1.sources.r1.interceptors = i1
a1.sources.r1.interceptors.i1.type = com.flume.inter.TimestampInterceptor$Builder


a1.channels.c1.type = file
a1.channels.c1.checkpointDir = /home/bigdata/shell/logtohdfs/maxwelltoktoh/data
a1.channels.c1.dataDirs = /home/bigdata/shell/logtohdfs/maxwelltoktoh/checkpoint/
a1.channels.c1.maxFileSize = 2146435071
a1.channels.c1.capacity = 1123456
a1.channels.c1.keep-alive = 6

## sink1
a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = /origin_data/gmall/inc_db/%{topic}_inc/%Y-%m-%d
a1.sinks.k1.hdfs.filePrefix = db
a1.sinks.k1.hdfs.round = false


a1.sinks.k1.hdfs.rollInterval = 10
a1.sinks.k1.hdfs.rollSize = 134217728
a1.sinks.k1.hdfs.rollCount = 0


a1.sinks.k1.hdfs.fileType = CompressedStream
a1.sinks.k1.hdfs.codeC = gzip

## 拼装
a1.sources.r1.channels = c1
a1.sinks.k1.channel= c1

配置部分说明

# Describe the sink
a2.sinks.k2.type = hdfs
a2.sinks.k2.hdfs.path = hdfs://master:8020/flume/data=%Y%m%d/hour=%H
#上传文件的前缀
a2.sinks.k2.hdfs.filePrefix = logs-
#是否按照时间滚动文件夹
a2.sinks.k2.hdfs.round = true
#多少时间单位创建一个新的文件夹
a2.sinks.k2.hdfs.roundValue = 1
#重新定义时间单位
a2.sinks.k2.hdfs.roundUnit = hour
#是否使用本地时间戳
a2.sinks.k2.hdfs.useLocalTimeStamp = true
#积攒多少个Event才flush到HDFS一次
a2.sinks.k2.hdfs.batchSize = 100
#设置文件类型,可支持压缩
a2.sinks.k2.hdfs.fileType = DataStream
#多久生成一个新的文件
a2.sinks.k2.hdfs.rollInterval = 30
#设置每个文件的滚动大小
a2.sinks.k2.hdfs.rollSize = 134217700
#文件的滚动与Event数量无关
a2.sinks.k2.hdfs.rollCount = 0

例子

a2.sources = r1
a2.channels = c1
a2.sinks = k1
 
a2.sources.r1.type = org.apache.flume.source.kafka.KafkaSource
a2.sources.r1.batchSize = 5000
a2.sources.r1.batchDurationMillis = 2000
a2.sources.r1.kafka.bootstrap.servers = 服务地址ip:9092,服务地址ip:9092
a2.sources.r1.kafka.topics = 主题,主题,主题,主题
a2.sources.r1.kafka.consumer.group.id = flume_product_05
a2.sources.r1.setTopicHeader = true
a2.sources.r1.topicHeader = topic
#零点漂移问题
a2.sources.r1.interceptors = i1
a2.sources.r1.interceptors.i1.type = com.interceptor.TimeStampInterceptor$Builder
a2.sources.r1.kafka.consumer.auto.offset.reset=latest
 
a2.channels.c1.type = file
a2.channels.c1.checkpointDir = /data/module/flume-1.9.0/checkpoint/behavior4
a2.channels.c1.dataDirs = /data/module/flume-1.9.0/data/behavior4/
a2.channels.c1.maxFileSize = 2146435071
a2.channels.c1.capacity = 1000000
a2.channels.c1.keep-alive = 6
 
## sink1
a2.sinks.k1.type = hdfs
a2.sinks.k1.hdfs.path = /origin_data/db/%{database}/%{topic}_inc/%Y-%m-%d
a2.sinks.k1.hdfs.filePrefix = db
a2.sinks.k1.hdfs.round = false
 
 
a2.sinks.k1.hdfs.rollInterval = 300
a2.sinks.k1.hdfs.rollSize = 134217728
a2.sinks.k1.hdfs.rollCount = 0
 
 
a2.sinks.k1.hdfs.fileType = CompressedStream
a2.sinks.k1.hdfs.codeC = gzip
 
## 拼装
a2.sources.r1.channels = c1
a2.sinks.k1.channel = c1
 
#更具自己的要求自行修改
#a2.sources.r1.kafka.consumer.auto.offset.reset=latest
#a2.sinks.k1.hdfs.rollInterval = 300
 

猜你喜欢

转载自blog.csdn.net/S1124654/article/details/129138397
今日推荐