当前位置: 首页 > news >正文

离线采集普遍解决方案

简介

使用Datax每日全量相关全量表,使用Maxwell增量采集到Kafka然后到Flume然后到Hdfs。

DataX全量

生成模板Json

gen_import_config.py

# coding=utf-8
import json
import getopt
import os
import sys
import MySQLdb#MySQL相关配置,需根据实际情况作出修改
mysql_host = "master"
mysql_port = "3306"
mysql_user = "root"
mysql_passwd = "root"#HDFS NameNode相关配置,需根据实际情况作出修改
hdfs_nn_host = "master"
hdfs_nn_port = "8020"#生成配置文件的目标路径,可根据实际情况作出修改
output_path = "/home/bigdata/datax/datax/job/pyjson"#获取mysql连接
def get_connection():return MySQLdb.connect(host=mysql_host, port=int(mysql_port), user=mysql_user, passwd=mysql_passwd)#获取表格的元数据  包含列名和数据类型
def get_mysql_meta(database, table):connection = get_connection()cursor = connection.cursor()sql = "SELECT COLUMN_NAME,DATA_TYPE from information_schema.COLUMNS WHERE TABLE_SCHEMA=%s AND TABLE_NAME=%s ORDER BY ORDINAL_POSITION"cursor.execute(sql, [database, table])fetchall = cursor.fetchall()cursor.close()connection.close()return fetchall#获取mysql表的列名
def get_mysql_columns(database, table):return map(lambda x: x[0], get_mysql_meta(database, table))#将获取的元数据中mysql的数据类型转换为hive的数据类型  写入到hdfswriter中
def get_hive_columns(database, table):def type_mapping(mysql_type):mappings = {"bigint": "bigint","int": "bigint","smallint": "bigint","tinyint": "bigint","decimal": "string","double": "double","float": "float","binary": "string","char": "string","varchar": "string","datetime": "string","time": "string","timestamp": "string","date": "string","text": "string"}return mappings[mysql_type]meta = get_mysql_meta(database, table)return map(lambda x: {"name": x[0], "type": type_mapping(x[1].lower())}, meta)#生成json文件
def generate_json(source_database, source_table):job = {"job": {"setting": {"speed": {"channel": 3},"errorLimit": {"record": 0,"percentage": 0.02}},"content": [{"reader": {"name": "mysqlreader","parameter": {"username": mysql_user,"password": mysql_passwd,"column": get_mysql_columns(source_database, source_table),"splitPk": "","connection": [{"table": [source_table],"jdbcUrl": ["jdbc:mysql://" + mysql_host + ":" + mysql_port + "/" + source_database]}]}},"writer": {"name": "hdfswriter","parameter": {"defaultFS": "hdfs://" + hdfs_nn_host + ":" + hdfs_nn_port,"fileType": "text","path": "${targetdir}","fileName": source_table,"column": get_hive_columns(source_database, source_table),"writeMode": "append","fieldDelimiter": "\t","compress": "gzip"}}}]}}if not os.path.exists(output_path):os.makedirs(output_path)with open(os.path.join(output_path, ".".join([source_database, source_table, "json"])), "w") as f:json.dump(job, f)def main(args):source_database = ""source_table = ""options, arguments = getopt.getopt(args, '-d:-t:', ['sourcedb=', 'sourcetbl='])for opt_name, opt_value in options:if opt_name in ('-d', '--sourcedb'):source_database = opt_valueif opt_name in ('-t', '--sourcetbl'):source_table = opt_valuegenerate_json(source_database, source_table)if __name__ == '__main__':main(sys.argv[1:])

脚本使用方法(执行以后就会生成表对应的json配置文件)

allfile.sh

#!/bin/bashpython gen_import_config.py  -d 数据库 -t 表名
python gen_import_config.py  -d 数据库 -t 表名
python gen_import_config.py  -d 数据库 -t 表名
python gen_import_config.py  -d 数据库 -t 表名
python gen_import_config.py  -d 数据库 -t 表名

全量导入到hdfs样例脚本

mysql-to-hdfs-datax.sh

#!/bin/bash
# mysql_to_hdfs_full.sh all 使用例子,改datax的home,还有改配置文件的地址就可以用了
DATAX_HOME=/home/bigdata/datax/datax# 如果传入日期则do_date等于传入的日期,否则等于前一天日期,也就是昨天
if [ -n "$2" ] ;thendo_date=$2
elsedo_date=`date -d "-1 day" +%F`
fi#处理目标路径,此处的处理逻辑是,如果目标路径不存在,则创建;若存在,则清空,目的是保证同步任务可重复执行
handle_targetdir() {hadoop fs -test -e $1if [[ $? -eq 1 ]]; thenecho "路径$1不存在,正在创建......"hadoop fs -mkdir -p $1elseecho "路径$1已经存在"fs_count=$(hadoop fs -count $1)content_size=$(echo $fs_count | awk '{print $3}')if [[ $content_size -eq 0 ]]; thenecho "路径$1为空"elseecho "路径$1不为空,正在清空......"hadoop fs -rm -r -f $1/*fifi
}#数据同步
import_data() {
#$1 /home/bigdata/datax/datax/job/pyjson/bigdata.activity_info.json
#$2 /origin_data/bigdata/db/activity_info_full/$do_datedatax_config=$1target_dir=$2handle_targetdir $target_dirpython $DATAX_HOME/bin/datax.py -p"-Dtargetdir=$target_dir" $datax_config
}case $1 in
"activity_info")
#/home/bigdata/datax/datax/job/pyjson改成自己文件生成的路径import_data /home/bigdata/datax/datax/job/pyjson/bigdata.activity_info.json /origin_data/bigdata/full_db/activity_info_full/$do_date;;
"all")import_data /home/bigdata/datax/datax/job/pyjson/bigdata.activity_info.json /origin_data/bigdata/full_db/activity_info_full/$do_date;;
esac

Maxwell增量

创建Kafka主题

createtopic.sh

#!/bin/bash
/home/bigdata/kafka/kafka_2.11-2.4.1/bin/kafka-topics.sh --bootstrap-server master:9092,node1:9092 --partitions 3 --replication-factor 3  --create --topic 表名           
/home/bigdata/kafka/kafka_2.11-2.4.1/bin/kafka-topics.sh --bootstrap-server master:9092,node1:9092 --partitions 3 --replication-factor 3  --create --topic 表名        
/home/bigdata/kafka/kafka_2.11-2.4.1/bin/kafka-topics.sh --bootstrap-server master:9092,node1:9092 --partitions 3 --replication-factor 3  --create --topic 表名
/home/bigdata/kafka/kafka_2.11-2.4.1/bin/kafka-topics.sh --bootstrap-server master:9092,node1:9092 --partitions 3 --replication-factor 3  --create --topic 表名
/home/bigdata/kafka/kafka_2.11-2.4.1/bin/kafka-topics.sh --bootstrap-server master:9092,node1:9092 --partitions 3 --replication-factor 3  --create --topic 表名

maxwell配置文件

# tl;dr config
log_level=info
client_id=fy_client_test02
replica_server_id=12302producer=kafka
kafka.compression.type=snappy
kafka.retries=3
kafka.acks=-1
#kafka.batch.size=16384
kafka.bootstrap.servers=cdh-server:9092,agent01:9092,agent02:9092
kafka_topic=%{database}_%{table}
#元数据库
host=cdh-server
port=3306
user=maxwell
password=密码
jdbc_options=useSSL=false&serverTimezone=Asia/Shanghai&characterEncoding=utf-8#目标库
replication_host=
replication_user=
replication_password=
replication_port=3306
#目标库
replication_jdbc_options=useSSL=false&serverTimezone=Asia/Shanghai&characterEncoding=utf-8
#jdbc_options=useSSL=false&serverTimezone=UTC&characterEncoding=utf-8
filter=exclude: *.*,include: 数据库.表,include: 数据库.表,include: 数据库.表,include: 数据库.表

启动maxwell脚本

startmaxwell.sh

#!/bin/bash
#/home/bigdata/maxwell/maxwell-1.29.2/bin/maxwell --config /home/bigdata/maxwell/maxwell-1.29.2/config.properties --daemon
MAXWELL_HOME=/home/bigdata/maxwell/maxwell-1.29.2status_maxwell(){result=`ps -ef | grep maxwell | grep -v grep | wc -l`return $result
}start_maxwell(){status_maxwellif [[ $? -lt 1 ]]; thenecho "启动Maxwell"$MAXWELL_HOME/bin/maxwell --config $MAXWELL_HOME/config.properties --daemonelseecho "Maxwell正在运行"fi
}stop_maxwell(){status_maxwellif [[ $? -gt 0 ]]; thenecho "停止Maxwell"ps -ef | grep maxwell | grep -v grep | awk '{print $2}' | xargs kill -9elseecho "Maxwell未在运行"fi
}case $1 in"start" )start_maxwell;;"stop" )stop_maxwell;;"restart" )stop_maxwellstart_maxwell;;
esac

简单脚本

#!/bin/bash
/home/bigdata/maxwell/maxwell-1.29.2/bin/maxwell --config /home/bigdata/maxwell/maxwell-1.29.2/config.properties --daemon

首日全量导入

alldatatohdfs.sh

#!/bin/bash# 该脚本的作用是初始化所有的增量表,只需执行一次MAXWELL_HOME=/home/bigdata/maxwell/maxwell-1.29.2import_data() {$MAXWELL_HOME/bin/maxwell-bootstrap --database 库名 --table $1 --config $MAXWELL_HOME/config.properties
}case $1 in
"cart_info")import_data cart_info;;
"all")import_data user_info;;
esac

Flume

获取maxwell到kafka的数据

启停脚本

#!/bin/bashcase $1 in
"start"){for i in node1doecho " --------启动 $i 采集flume-------"ssh $i "nohup /home/bigdata/flume/flume-1.9.0/bin/flume-ng agent --conf /home/bigdata/flume/flume-1.9.0/conf --conf-file /home/bigdata/shell/maxwelltoktoh/flumeconf/kafka-flume-hdfs-inc.conf --name a1 -Dflume.root.logger=INFO,console >/dev/null 2>&1 &"done
};; 
"stop"){for i in node1doecho " --------停止 $i 采集flume-------"ssh $i " ps -ef | grep kafka-flume-hdfs-inc.conf | grep -v grep |awk  '{print \$2}' | xargs -n1 kill -9 "done};;
esac

配置文件

kafka-flume-hdfs-inc.conf

a1.sources = r1
a1.channels = c1
a1.sinks = k1a1.sources.r1.type = org.apache.flume.source.kafka.KafkaSource
a1.sources.r1.batchSize = 5000
a1.sources.r1.batchDurationMillis = 2000
a1.sources.r1.kafka.bootstrap.servers = master:9092,node1:9092,node2:9092
a1.sources.r1.kafka.topics = activity_info,user_info
a1.sources.r1.kafka.consumer.group.id = flume
a1.sources.r1.setTopicHeader = true
a1.sources.r1.topicHeader = topic
a1.sources.r1.interceptors = i1
a1.sources.r1.interceptors.i1.type = com.flume.inter.TimestampInterceptor$Buildera1.channels.c1.type = file
a1.channels.c1.checkpointDir = /home/bigdata/shell/logtohdfs/maxwelltoktoh/data
a1.channels.c1.dataDirs = /home/bigdata/shell/logtohdfs/maxwelltoktoh/checkpoint/
a1.channels.c1.maxFileSize = 2146435071
a1.channels.c1.capacity = 1123456
a1.channels.c1.keep-alive = 6## sink1
a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = /origin_data/gmall/inc_db/%{topic}_inc/%Y-%m-%d
a1.sinks.k1.hdfs.filePrefix = db
a1.sinks.k1.hdfs.round = falsea1.sinks.k1.hdfs.rollInterval = 10
a1.sinks.k1.hdfs.rollSize = 134217728
a1.sinks.k1.hdfs.rollCount = 0a1.sinks.k1.hdfs.fileType = CompressedStream
a1.sinks.k1.hdfs.codeC = gzip## 拼装
a1.sources.r1.channels = c1
a1.sinks.k1.channel= c1

配置部分说明

# Describe the sink
a2.sinks.k2.type = hdfs
a2.sinks.k2.hdfs.path = hdfs://master:8020/flume/data=%Y%m%d/hour=%H
#上传文件的前缀
a2.sinks.k2.hdfs.filePrefix = logs-
#是否按照时间滚动文件夹
a2.sinks.k2.hdfs.round = true
#多少时间单位创建一个新的文件夹
a2.sinks.k2.hdfs.roundValue = 1
#重新定义时间单位
a2.sinks.k2.hdfs.roundUnit = hour
#是否使用本地时间戳
a2.sinks.k2.hdfs.useLocalTimeStamp = true
#积攒多少个Event才flush到HDFS一次
a2.sinks.k2.hdfs.batchSize = 100
#设置文件类型,可支持压缩
a2.sinks.k2.hdfs.fileType = DataStream
#多久生成一个新的文件
a2.sinks.k2.hdfs.rollInterval = 30
#设置每个文件的滚动大小
a2.sinks.k2.hdfs.rollSize = 134217700
#文件的滚动与Event数量无关
a2.sinks.k2.hdfs.rollCount = 0

例子

a2.sources = r1
a2.channels = c1
a2.sinks = k1a2.sources.r1.type = org.apache.flume.source.kafka.KafkaSource
a2.sources.r1.batchSize = 5000
a2.sources.r1.batchDurationMillis = 2000
a2.sources.r1.kafka.bootstrap.servers = 服务地址ip:9092,服务地址ip:9092
a2.sources.r1.kafka.topics = 主题,主题,主题,主题
a2.sources.r1.kafka.consumer.group.id = flume_product_05
a2.sources.r1.setTopicHeader = true
a2.sources.r1.topicHeader = topic
#零点漂移问题
a2.sources.r1.interceptors = i1
a2.sources.r1.interceptors.i1.type = com.interceptor.TimeStampInterceptor$Builder
a2.sources.r1.kafka.consumer.auto.offset.reset=latesta2.channels.c1.type = file
a2.channels.c1.checkpointDir = /data/module/flume-1.9.0/checkpoint/behavior4
a2.channels.c1.dataDirs = /data/module/flume-1.9.0/data/behavior4/
a2.channels.c1.maxFileSize = 2146435071
a2.channels.c1.capacity = 1000000
a2.channels.c1.keep-alive = 6## sink1
a2.sinks.k1.type = hdfs
a2.sinks.k1.hdfs.path = /origin_data/db/%{database}/%{topic}_inc/%Y-%m-%d
a2.sinks.k1.hdfs.filePrefix = db
a2.sinks.k1.hdfs.round = falsea2.sinks.k1.hdfs.rollInterval = 300
a2.sinks.k1.hdfs.rollSize = 134217728
a2.sinks.k1.hdfs.rollCount = 0a2.sinks.k1.hdfs.fileType = CompressedStream
a2.sinks.k1.hdfs.codeC = gzip## 拼装
a2.sources.r1.channels = c1
a2.sinks.k1.channel = c1#更具自己的要求自行修改
#a2.sources.r1.kafka.consumer.auto.offset.reset=latest
#a2.sinks.k1.hdfs.rollInterval = 300

http://www.lryc.cn/news/15592.html

相关文章:

  • SAP ABAP 数据类型P类型详解
  • 应用沙盒seccomp的使用
  • C++项目——高并发内存池(2)——thread_cache的基础功能实现
  • 【C进阶】数据的存储
  • 【已解决】异常断电文件损坏clickhouse启动不了:filesystem error Structure needs cleaning
  • FlinkSQL行级权限解决方案及源码
  • 【基础篇】8 # 递归:如何避免出现堆栈溢出呢?
  • 基于微信公众号(服务号)实现扫码自动登录系统功能
  • AXI实战(二)-跟着产品手册设计AXI-Lite外设(AXI-Lite转串口实现)
  • 一周搞定模拟电路视频教程,拒绝讲PPT,仿真软件配合教学,真正一周搞定
  • 高德地图获得角度
  • 【C++】-- C++11基础常用知识点(下)
  • 提到数字化,你想到哪些关键词
  • 【蓝桥杯集训·每日一题】AcWing 1249. 亲戚
  • iphone所有机型的屏幕尺寸
  • Windows10使用-处理IE自动跳转至Edge
  • linux input子系统,gpio-keys,gpio中断使用
  • 分析称勒索攻击在非洲、中东与中国增长最快
  • ArcPy批量合并矢量shape文件
  • 改写有序表的题目核心点
  • 收藏这几个开源管理系统做项目,领导看了直呼牛X!
  • 【刷题篇】链表(下)
  • Shiro
  • 使用nginx进行负载均衡配置详细说明
  • N皇后问题
  • 强化学习DQN之俄罗斯方块
  • 1.3总线:并行总线、串行总线、单工、半双工、全双工、总线宽度、总线带宽、总线的分类、数据总线、地址总线、控制总线
  • Linux驱动开发—设备树开发详解
  • 深入浅出C++ ——继承
  • 设计模式C++实现20: 桥接模式(Bridge)