connector节点(kafka和hbase和phoenix)交互代码

1.config

package com.dgindusoft.connector.config;


import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.kafka.annotation.EnableKafka;
import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.core.*;
import org.springframework.kafka.listener.ContainerProperties;


import java.util.HashMap;
import java.util.Map;

/**
 * @author tianshl
 * @version 2017/9/1 下午04:07
 */
@Configuration
@EnableKafka
public class KafkaConfiguration {

   @Value("${spring.kafka.bootstrap-servers}")
    private String bootstrapServers;

    @Value("${spring.kafka.consumer.enable-auto-commit}")
    private Boolean autoCommit;

    @Value("${spring.kafka.consumer.auto-commit-interval}")
    private Integer autoCommitInterval;

    @Value("${spring.kafka.consumer.group-id1}")
    private String groupId1;

    @Value("${spring.kafka.consumer.group-id2}")
    private String groupId2;

    @Value("${spring.kafka.consumer.group-id3}")
    private String groupId3;

    @Value("${spring.kafka.consumer.max-poll-records}")
    private Integer maxPollRecords;

    @Value("${spring.kafka.consumer.auto-offset-reset}")
    private String autoOffsetReset;

    @Value("${spring.kafka.consumer.enable-auto-commit}")
    private String enableAutoCommit;

    @Value("${spring.indexName}")
    private  String tableIndexName;


 /**
     * 消费者1配置信息
     */
    @Bean
    public Map<String, Object> consumerConfigsAlarm() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId1);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 120000);
        props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 180000);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return props;
    }

   /**
     * 消费者1批量工程
     */
    @Bean
    public KafkaListenerContainerFactory<?> batchFactoryAlarm() {
        ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigsAlarm()));
        //设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
        factory.setBatchListener(true);
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        return factory;
    }


    /**
     * 消费者2配置信息
     */
    @Bean
    public Map<String, Object> consumerConfigsData() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId2);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 120000);
        props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 180000);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return props;
    }

 /**
     * 消费者2批量工程
     */
    @Bean
    public KafkaListenerContainerFactory<?> batchFactoryData() {
        ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigsData()));
        //设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
        factory.setBatchListener(true);
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        return factory;
    }

 /**
     * 消费者3配置信息
     */
    @Bean
    public Map<String, Object> consumerConfigsState() {
        Map<String, Object> props = new HashMap<>();
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId3);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset);
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, enableAutoCommit);
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 120000);
        props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 180000);
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        return props;
    }
 /**
     * 消费者3批量工程
     */
    @Bean
    public KafkaListenerContainerFactory<?> batchFactoryState() {
        ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
        factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigsState()));
        //设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG
        factory.setBatchListener(true);
        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);
        return factory;
    }

    @Bean
    public KafkaAdmin kafkaAdmin() {
        Map<String, Object> props = new HashMap<>();
        //配置Kafka实例的连接地址

        props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        KafkaAdmin admin = new KafkaAdmin(props);
        return admin;
    }

    @Bean
    public AdminClient adminClient() {
        return AdminClient.create(kafkaAdmin().getConfig());

    }
}

2.entity

package com.dgindusoft.connector.entity;


import lombok.Data;

import java.util.HashMap;
import java.util.Map;

/**
 * @description: 返回结果类
 * @author: lijj
 * @create: 2020-03-26 14:21
 **/
@Data
public class Result {
    private Boolean success;
    private Integer code;
    private String message;
    private Map<String, Object> data = new HashMap<>();

    /**
     * 构造器私有
     */
    private Result(){}

    /**
     * 通用返回成功
     * @return
     */
    public static Result ok() {
        Result r = new Result();
        r.setSuccess(ResultCodeEnum.SUCCESS.getSuccess());
        r.setCode(ResultCodeEnum.SUCCESS.getCode());
        r.setMessage(ResultCodeEnum.SUCCESS.getMessage());
        return r;
    }

    /**
     * 通用返回失败,未知错误
     * @return
     */
    public static Result error() {
        Result r = new Result();
        r.setSuccess(ResultCodeEnum.UNKNOWN_ERROR.getSuccess());
        r.setCode(ResultCodeEnum.UNKNOWN_ERROR.getCode());
        r.setMessage(ResultCodeEnum.UNKNOWN_ERROR.getMessage());
        return r;
    }

    /**
     * 设置结果,形参为结果枚举
     * @param result
     * @return
     */
    public static Result setResult(ResultCodeEnum result) {
        Result r = new Result();
        r.setSuccess(result.getSuccess());
        r.setCode(result.getCode());
        r.setMessage(result.getMessage());
        return r;
    }
  /**------------使用链式编程,返回类本身-----------**/

    /**
     * 自定义返回数据
     * @param map
     * @return
     */
    public Result data(Map<String,Object> map) {
        this.setData(map);
        return this;
    }

    /**
     * 通用设置data
     * @param key
     * @param value
     * @return
     */
    public Result data(String key, Object value) {
        this.data.put(key, value);
        return this;
    }

    /**
     * 自定义状态信息
     * @param message
     * @return
     */
    public Result message(String message) {
        this.setMessage(message);
        return this;
    }

    /**
     * 自定义状态码
     * @param code
     * @return
     */
    public Result code(Integer code) {
        this.setCode(code);
        return this;
    }

    /**
     * 自定义返回结果
     * @param success
     * @return
     */
    public Result success(Boolean success) {
        this.setSuccess(success);
        return this;
    }
}
package com.dgindusoft.connector.entity;

import lombok.Getter;

/**
 * @author lijj
 * @date 2020-03-27
 */
@Getter
public enum ResultCodeEnum {
    SUCCESS(true,4000,"成功"),
    UNKNOWN_ERROR(false,4001,"未知错误"),
    PARAM_ERROR(false,4002,"参数错误"),
    TABLE_EXIST_ERROR(false,4003,"表已经存在"),
    INSERT_ERROR(false,4004,"插入hbase失败")
    ;
    /**
     * 相应是否成功
     */
    private Boolean success;
    // 响应状态码
    private Integer code;
    // 响应信息
    private String message;

    ResultCodeEnum(boolean success, Integer code, String message) {
        this.success = success;
        this.code = code;
        this.message = message;
    }
}

 

3.service

/**
 * @description: HBase基本操作实现类
 * @author: lijj
 * @create: 2020-04-03 12:59
 **/
@Service
public class BaseServiceImpl implements BaseService {

    @Override
    public Result createTable(String tableName) {
        List<String> cfs = new ArrayList<>();
        cfs.add("data");
        if (HBaseUtil.createTable(tableName, cfs)) {
            return Result.ok().message("表创建成功");
        } else {
            return Result.setResult(ResultCodeEnum.TABLE_EXIST_ERROR);
        }
    }

    @Override
    public Result insertData(String tableName, String deviceTime, String modelID, String deviceID, Map<String, Object> dataMap) {
//        String rowKey = tableName + LocalDateTime.now().toInstant(ZoneOffset.of("+8")).toEpochMilli();
        String rowKey = modelID + deviceID + deviceTime;
        List<Put> puts = new ArrayList<>();
        Put put = new Put(Bytes.toBytes(rowKey));
        dataMap.forEach((key, value) -> {
  
            put.addColumn(Bytes.toBytes("data"), Bytes.toBytes(key),
                    Bytes.toBytes("" + value));
            puts.add(put);
        });
        try {
            HBaseUtil.putRows(tableName, puts);
        } catch (Exception e) {
            e.printStackTrace();
        }
        return Result.ok().message("插入成功");
    }



    @Override
    public Result insertData(String tableName, List<Put> puts) {
        try {
            HBaseUtil.putRows(tableName, puts);
        } catch (Exception e) {
            e.printStackTrace();
        }
        return Result.ok().message("插入成功");
    }

    @Override
    public Result isTableExist(String tableName) {
        Boolean result = HBaseUtil.isTableExist(tableName);
        if (result) {
            return Result.setResult(ResultCodeEnum.TABLE_EXIST_ERROR);
        } else {
            return Result.ok().message("该表不存在");
        }
    }
}
@Service
public class DeviceService {
    public static final Logger logger = LoggerFactory.getLogger(DeviceService.class);

    @Value("${spring.indexName}")
    private String tableIndexName;

    @Autowired
    private BaseServiceImpl baseService;
    //依赖注入hbase的实例


    @Autowired
    private DeviceModelRepository deviceModelRepository;

    /**
     * 订阅设备警报
     *
     * @param recordList
     */
    @KafkaListener(topicPattern = TopicManagement.ALARM, containerFactory = "batchFactoryAlarm")
    void processAlarmMessage(List<ConsumerRecord<Integer, String>> recordList, Acknowledgment acknowledgment) {
        this.dealMessageList(recordList, "deviceAlarm", acknowledgment);
    }
/**
     * 订阅设备数据
     *
     * @param recordList
     */
    @KafkaListener(topicPattern = TopicManagement.DATA + ".*", containerFactory = "batchFactoryData")
    void processDataMessage(List<ConsumerRecord<Integer, String>> recordList, Acknowledgment acknowledgment) {
        this.dealDataMessageList(recordList, "deviceData", acknowledgment);
    }

    /**
     * 订阅设备状态
     *
     * @param recordList
     */
    @KafkaListener(topicPattern = TopicManagement.STATE, containerFactory = "batchFactoryState")
    void processStateMessage(List<ConsumerRecord<Integer, String>> recordList, Acknowledgment acknowledgment) {
        this.dealMessageList(recordList, "deviceState", acknowledgment);
    }

    /**
     * 对监听到的消息列表进行统一处理
     *
     * @param recordList     监听到的消息列表
     * @param type           类型(alarm、state)
     * @param acknowledgment 手动消费
     * @throws NullPointerException
     */
    public void dealMessageList(List<ConsumerRecord<Integer, String>> recordList, String type,
                                Acknowledgment acknowledgment) throws NullPointerException {
        String dataType = null;
        String tableName = null;
        //当前传入的类型不匹配
        if (!(type.equals("deviceAlarm")) && !(type.equals("deviceState"))) {
            //当前传入的类型不匹配
            logger.error("传的参数有误,无法进行字段校验,请检查!");
            return;
        }
        if (type.equals("deviceAlarm")) {
            dataType = "alarmCode";
            tableName = "device_alarm";
        } else {
            dataType = "stateCode";
            tableName = "device_state";
        }

        if (null == recordList) {
            throw new NullPointerException();
        }
        List<Put> puts = new ArrayList<>();
        for (int i = 0; i < recordList.size(); i++) {
            if (null == recordList.get(i)) {
                logger.error("当前记录为空");
                continue;
            }
            //将获取到的payLoad转换为json
            String topic = recordList.get(i).topic();
            String payLoadString = recordList.get(i).value();
            if (!isJsonObject(payLoadString)) {
                logger.error("payLoad不是一个jsonObject,当前payLoad为:" + payLoadString);
                //跳出此次循环,进入下一次循环
                continue;
            }
            JSONObject data = JSONObject.parseObject(payLoadString);

            //对获取到的payload的字段进行校验
            // 获取到的modelID/deviceID/deviceTime为空,排除
            if (!(dataCheckCommonData(data))) {
                //跳出此次循环,进入下一次循环
                continue;
            }

            //当前消息为警报或状态
            //获取到的stateCode/alarmCode为空,排除
            if (data.get(dataType) == null || StringUtils.isBlank(data.get(dataType).toString())) {
                logger.error("获取到的stateCode/alarmCode为空,当前payLoad为:" + data);
                //跳出此次循环,进入下一次循环
                continue;
            }


            String rowKey = data.get("modelID").toString() + data.get("deviceID").toString() + data.get("deviceTime").toString();
            Map<String, String> dataMap = new HashMap<>(16);
            data.forEach((key, value) ->
            {
                //将数据的JSON格式转为Map格式
                dataMap.put(key, value.toString());
            });
            Put put = new Put(Bytes.toBytes(rowKey));
            dataMap.forEach((key, value) -> {
                put.addColumn(Bytes.toBytes("data"), Bytes.toBytes(key),
                        Bytes.toBytes("" + value));
            });

            puts.add(put);
        }//结束循环

 if (null == tableName) {
            logger.error("表名为空");
            return;
        }
        //判断表是否存在
        Result isTableExistResult = baseService.isTableExist(tableName);
        if (isTableExistResult.getSuccess()) {
            Result createTableResult = baseService.createTable(tableName);
            if (!(createTableResult.getSuccess())) {
                logger.error("创建表" + tableName + "失败,失败信息:" + createTableResult.toString());
            }
        }

        //存入hbase
        Result result = baseService.insertData(tableName, puts);
        if (!(result.getSuccess())) {
            logger.error("插入hbase失败,失败信息:" + result.toString());
        } else {
            acknowledgment.acknowledge();
        }
        puts.clear();
        puts = null;

    }
 /**
     * 对监听到的device_data消息列表进行统一处理
     *
     * @param recordList     监听到的消息列表
     * @param type           类型(data)
     * @param acknowledgment 手动消费
     * @throws NullPointerException
     */
    public void dealDataMessageList(List<ConsumerRecord<Integer, String>> recordList, String type,
                                    Acknowledgment acknowledgment) throws NullPointerException {
        //当前传入的类型不匹配
        if (!(type.equals("deviceData"))) {
            //当前传入的类型不匹配数据、状态和警报中的任一种
            logger.error("传的参数有误,无法进行字段校验,请检查!");
            return;
        }
        if (null == recordList) {
            throw new NullPointerException();
        }
        Map<String, List<String>> modelIdAndSubDeviceMap = new HashMap<>(16);
        Map<String, List<Put>> tableNameAndDataMap = new HashMap<>(16);
        Map<String, List<Put>> tableNameAndDataMapIndex = new HashMap<>(16);//索引表的表名和批量插入数据

       for (int i = 0; i < recordList.size(); i++) {
            if (null == recordList.get(i)) {
                logger.error("当前记录为空");
                continue;
            }
            //将获取到的payLoad转换为json
            String topic = recordList.get(i).topic();
            String payLoadString = recordList.get(i).value();
            if (!isJsonObject(payLoadString)) {
                logger.error("payLoad不是一个jsonObject,当前payLoad为:" + payLoadString);
                continue;
            }
            JSONObject data = JSONObject.parseObject(payLoadString);

            //对获取到的payload的字段进行校验
            // 获取到的modelID/deviceID/deviceTime为空,排除
            if (!(dataCheckCommonData(data))) {
                continue;
            }

            //当前消息为设备数据,对deviceData进行校验
            if (!(checkDeviceData(data))) {
                continue;
            }
 String modelId = data.get("modelID").toString();

            //索引表的表名拼接成最新表名
            String tableIndexNameNow = tableIndexName + modelId;

            //对当前payload和topic是否匹配进行校验
            if (!("device_data_" + modelId).equals(topic)) {
                logger.error("topic和payload不匹配");
                logger.error("当前topic为:" + topic);
                logger.error("当前payload为" + payLoadString);
                continue;
            }

            String subDevice = "";
            List<String> subDeviceList = new ArrayList<>();
             //查询当前模型是否包含子设备
            if (modelIdAndSubDeviceMap.keySet().contains(modelId)) {
                subDeviceList = modelIdAndSubDeviceMap.get(modelId);
            } else {
                String subDeviceString = getSubDeviceByModelId(modelId);
                if (StringUtils.isNotBlank(subDeviceString)) {
                    String[] subDeviceListTemp = subDeviceString.split(",");
                    Collections.addAll(subDeviceList, subDeviceListTemp);
                    modelIdAndSubDeviceMap.put(modelId, subDeviceList);
                } else {
                    modelIdAndSubDeviceMap.put(modelId, null);

                }
            }

 Map<String, String> dataMap = new HashMap<>(16);
            data.forEach((key, value) ->
            {
                //将数据的JSON格式转为Map格式
                //deviceData为jsonObject,单独处理
                if (key.equals("deviceData")) {
                    // deviceData是否为jsonObject已在上方完成校验
                    JSONObject deviceDataTemp = JSONObject.parseObject(value.toString());
                    deviceDataTemp.forEach((itemKey, itemValue) -> {
                        dataMap.put(itemKey, itemValue.toString());
                    });
                } else {
                    dataMap.put(key, value.toString());
                }
            });

           //补充subDevice
            if ((subDeviceList != null) && (subDeviceList.size() > 0)) {
                for (int subDeviceIndex = 0; subDeviceIndex < subDeviceList.size(); subDeviceIndex++) {
                    String subDeviceValue = subDeviceList.get(subDeviceIndex);
                    if (subDeviceIndex == 0) {
                        subDevice = subChildDeviceNo(dataMap.get(subDeviceValue));
                    } else {
                        subDevice = subDevice + "," + subChildDeviceNo(dataMap.get(subDeviceValue));
                    }
                }
            }

            dataMap.put("subDevice", subDevice);
 String rowKey = "";
            if (StringUtils.isNotBlank(subDevice)) {
                rowKey = data.get("modelID").toString() + data.get("deviceID").toString() + subDevice + data.get("deviceTime").toString();
            } else {
                rowKey = data.get("modelID").toString() + data.get("deviceID").toString() + data.get("deviceTime").toString();
            }

            Put put = new Put(Bytes.toBytes(rowKey));
            dataMap.forEach((key, value) -> {
                put.addColumn(Bytes.toBytes("data"), Bytes.toBytes(key),
                        Bytes.toBytes("" + value));
            });

            if (tableNameAndDataMap.containsKey(topic)) {
                //该条记录与前边已经出现的记录同属于一个表
                tableNameAndDataMap.get(topic).add(put);

            } else {
                List<Put> putTemp = new ArrayList<>();
                putTemp.add(put);
                tableNameAndDataMap.put(topic, putTemp);
            }//插入原表循环

            //拼接hbase表字符串,向hbase索引表里面插入数据
            // 实现phoenix索引表的数据更新
            //调用RowKeyIndex类中的方法
            byte[] rowKeyIndex = RowKeyIndex.rowKeyIndex(dataMap, rowKey);
            //向表中插入数据
            Put putIndex = new Put(rowKeyIndex);


            //向put对象中组装数据
            dataMap.forEach((key, value) -> {
                putIndex.addColumn(Bytes.toBytes("data"), Bytes.toBytes("data:" + key),
                        Bytes.toBytes("" + value));
            });


            if (tableNameAndDataMapIndex.containsKey(tableIndexNameNow)) {
                tableNameAndDataMapIndex.get(tableIndexNameNow).add(putIndex);
            } else {
                List<Put> putTempIndex = new ArrayList<>();
                putTempIndex.add(putIndex);
                tableNameAndDataMapIndex.put(tableIndexNameNow, putTempIndex);
            }
            //插入索引表循环结束
        }//表结束循环
 if (tableNameAndDataMap.size() <= 0) {
            logger.error("表名为空");
            return;
        }
        int insertFlag = 0;
        for (String key : tableNameAndDataMap.keySet()) {
            //判断表是否存在
            Result isTableExistResult = baseService.isTableExist(key);
            if (isTableExistResult.getSuccess()) {
                Result createTableResult = baseService.createTable(key);
                if (!(createTableResult.getSuccess())) {
                    logger.error("创建表" + key + "失败,失败信息:" + createTableResult.toString());
                }
            }
  //存入hbase主表
            Result result = baseService.insertData(key, tableNameAndDataMap.get(key));
            logger.debug(tableNameAndDataMap.toString());
            if (!(result.getSuccess())) {
                logger.error("插入hbase失败,失败信息:" + result.toString());
                insertFlag++;
            }
            System.out.println("插入主表成功:" + key);
        }

        if (insertFlag <= 0) {
            acknowledgment.acknowledge();
        }

        tableNameAndDataMap.clear();
        tableNameAndDataMap = null;

        //存入Hbase索引表

        if (tableNameAndDataMapIndex.size() <= 0) {
            logger.error("索引表名为空");
            return;
        }
        int insertFlagIndex = 0;
        for (String key : tableNameAndDataMapIndex.keySet()) {
            //判断表是否存在
            Result isIndexTableExistResult = baseService.isTableExist(key);
            //索引表存在,进行插入,不存在不插入。
            if (!isIndexTableExistResult.getSuccess()) {

                //存入hbase索引表
                Result resultIndex = baseService.insertData(key, tableNameAndDataMapIndex.get(key));
                logger.debug(tableNameAndDataMapIndex.toString());
                if (!(resultIndex.getSuccess())) {
                    logger.error("插入hbase索引表失败,失败信息:" + resultIndex.toString());
                    insertFlagIndex++;
                }
                System.out.println("插入索引表成功:" + key);
            } else {
                logger.error("索引表不存在!");
            }

        }
        tableNameAndDataMapIndex.clear();
        tableNameAndDataMapIndex = null;//索引表插入到此结束

    }
 /**
     * 将Map<String,String>转为Map<String,Object>
     *
     * @param map
     * @return
     */
    public Map<String, Object> convert(Map<String, String> map) {
        Map<String, Object> objectMap = new HashMap<>(16);
        map.forEach((key, value) -> {
            objectMap.put(key, value);
        });
        return objectMap;
    }

    /**
     * 判断字符串是否可以转化为json对象
     *
     * @param content
     * @return
     */
    public boolean isJsonObject(String content) {
        try {
            JSONObject jsonStr = JSONObject.parseObject(content);
            return true;
        } catch (Exception e) {
            logger.error("转换JSONObject失败" + e);
            return false;
        }
    }
 /**
     * 进行公共数据的校验,校验modelID、deviceID和deviceTime
     *
     * @param data
     * @return
     */
    public boolean dataCheckCommonData(JSONObject data) {
        if (data.get("modelID") == null || StringUtils.isBlank(data.get("modelID").toString())
                || data.get("deviceID") == null || StringUtils.isBlank(data.get("deviceID").toString())
                || data.get("deviceTime") == null || StringUtils.isBlank(data.get("deviceTime").toString())) {
            logger.error("获取到的modelID/deviceID/deviceTime为空,当前payLoad为:" + data);
            return false;
        }
        return true;
    }
 /**
     * 进行deviceData的校验
     *
     * @param data
     * @return
     */
    public boolean checkDeviceData(JSONObject data) {
        //deviceData为空,排除
        if (data.get("deviceData") == null || StringUtils.isBlank(data.get("deviceData").toString())) {
            logger.error("deviceData为空,当前payLoad为:" + data);
            return false;
        }
        //deviceData不是jsonObject,排除
        if (!isJsonObject(data.get("deviceData").toString())) {
            logger.error("deviceData不是jsonObject,当前payLoad为:" + data);
            return false;
        }
        //deviceData是空的,jsonObject,排除
        String replaceData = data.getJSONObject("deviceData").toString().replace("{", "").replace("}", "");
        if (data.getJSONObject("deviceData") == null || StringUtils.isBlank(replaceData)) {
            logger.error("deviceData是空的jsonObject,当前payLoad为:" + data);
            return false;
        }

        return true;
    }


    /**
     * 子设备编号是否为英文/数字,包含别的字符时直接去除
     *
     * @param childDeviceNo 子设备编号
     * @return 返回去除后字符串
     */
    public String subChildDeviceNo(String childDeviceNo) {
        if (StringUtils.isNotBlank(childDeviceNo)) {
            childDeviceNo = childDeviceNo.replaceAll("[^0-9a-zA-Z-]", "");
        } else {
            childDeviceNo = "";
        }
        return childDeviceNo;
    }


    /**
     * 获取模型里的subDevice字段
     *
     * @param modelId
     * @return
     */
    public String getSubDeviceByModelId(String modelId) {
        //从mysql获取模型
        DeviceModel deviceModel = deviceModelRepository.findByModelId(modelId);
        if (deviceModel != null) {
            return deviceModel.getSubDevice();
        } else {
            return "";
        }
    }

}
package com.dgindusoft.connector.service.impl;

public class TopicManagement {
    public static final String DATA = "device_data_";
    public static final String STATE = "device_state";
    public static final String ALARM = "device_alarm";
}
package com.dgindusoft.connector.service;


import com.dgindusoft.connector.entity.Result;
import org.apache.hadoop.hbase.client.Put;

import java.util.List;
import java.util.Map;

/**
 * @description: HBase基本操作
 * @author: lijj
 * @create: 2020-04-03 12:53
 **/
public interface BaseService {

    /**
     * 创建表
     *
     * @param tableName
     * @return
     */
    Result createTable(String tableName);

    /**
     * 插入数据
     *
     * @param tableName
     * @param deviceTime
     * @param modelID
     * @param deviceID
     * @param dataMap
     * @return
     */
    Result insertData(String tableName, String deviceTime, String modelID, String deviceID, Map<String, Object> dataMap);

    /**
     * 批量插入数据
     *
     * @param tableName
     * @param puts
     * @return
     */
    Result insertData(String tableName, List<Put> puts);
  
    /**
     * 判断表是否存在
     *
     * @param tableName
     * @return
     */
    Result isTableExist(String tableName);
}

4.utils

package com.dgindusoft.connector.util;

import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.annotation.Order;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;

/**
 * 获取hBase配置文件的工具类
 *
 * @author: lil
 * @date: 2020/7/28
 */
@Component
@Order(value = 1)
public class GetPropertiesUtil {

    private static String quorum;

    @Value("${hbase.zookeeper.quorum}")
    public void setProQuorum(String quorumTemp) {
        quorum = quorumTemp;
    }

    public static String getQuorum() {
        return quorum;
    }
}
/**
 * @author lijj
 * @date 2020-03-29
 * 0.创建配置对象,获取habse的连接
 * 1.获取Habase连接对象
 * 2. 获取操作对象
 * 3.获取操作结果
 * 4.关闭数据库连接
 * 该类是封装数据库的连接和连接指定表的对象
 */
@Slf4j
@Component
@Order(value = 2)
public class HBaseConn {

    private static final HBaseConn INSTANCE = new HBaseConn();
    private static Configuration configuration;
    private static Connection connection;

    private HBaseConn() {
        try {
            if (configuration == null) {
                configuration = HBaseConfiguration.create();//创建配置对象,获取hbase的连接
                configuration.set("hbase.zookeeper.quorum", GetPropertiesUtil.getQuorum());
                log.debug("=======================================");
                log.debug(GetPropertiesUtil.getQuorum());
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
 //获取数据库的连接
    private Connection getConnection() {
        if (connection == null || connection.isClosed()) {
            try {
                connection = ConnectionFactory.createConnection(configuration);
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
        return connection;
    }

    public static Connection getHBaseConn() {
        return INSTANCE.getConnection();
    }

    public static Table getTable(String tableName) throws IOException {
        return INSTANCE.getConnection().getTable(TableName.valueOf(tableName));
    }

    public static void closeConn() {
        if (connection != null) {
            try {
                connection.close();
            } catch (IOException ioe) {
                ioe.printStackTrace();
            }
        }
    }
}

/**
 * @author lijj
 * @date 2019-08-27
 */
public class HBaseUtil {

    /**
     * 创建HBase表.
     *
     * @param tableName 表名
     * @param cfs       列族的数组
     * @return 是否创建成功
     */
    public static boolean createTable(String tableName, List<String> cfs) {
        try (HBaseAdmin admin = (HBaseAdmin) HBaseConn.getHBaseConn().getAdmin()) {//获取操作对象admin
            if (admin.tableExists(TableName.valueOf(tableName))) {
                return false;
            }
            List<ColumnFamilyDescriptor> familyDescriptors = new ArrayList<>(cfs.size());
            for (String column : cfs) {
                familyDescriptors.add(ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes(column)).build());
        }
        TableDescriptor tableDescriptor = TableDescriptorBuilder.newBuilder(TableName.valueOf(tableName))
                .setColumnFamilies(familyDescriptors).build();
        admin.createTable(tableDescriptor);
        } catch (Exception e) {
            e.printStackTrace();
        }
        return true;
    }
 /**
     * 查看HBase表是否存在
     *
     * @param tableName 表名
     * @return 是否存在该表
     */
    public static boolean isTableExist(String tableName) {
        try (HBaseAdmin admin = (HBaseAdmin) HBaseConn.getHBaseConn().getAdmin()) {
            if (admin.tableExists(TableName.valueOf(tableName))) {
                return true;
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
        return false;
    }
 /**
     * 删除hbase表.
     *
     * @param tableName 表名
     * @return 是否删除成功
     */
    public static boolean deleteTable(String tableName) {
        try (HBaseAdmin admin = (HBaseAdmin) HBaseConn.getHBaseConn().getAdmin()) {
            admin.disableTable(TableName.valueOf(tableName));
            admin.deleteTable(TableName.valueOf(tableName));
        } catch (Exception e) {
            e.printStackTrace();
        }
        return true;
    }
  /**
     * hbase插入一条数据.
     *put参数 family列族名  column列名本身 val/data 列值
     * String family="info" string column= "name" string val = "zhangsan"
     * @param tableName 表名
     * @param rowKey    唯一标识
     * @param cfName    列族名(family)
     * @param qualifier 列标识 列名本身
     * @param data      数据 列值
     * @return 是否插入成功
     */
    public static boolean putRow(String tableName, String rowKey, String cfName, String qualifier,
                                 String data) {
        try (Table table = HBaseConn.getTable(tableName)) {
            Put put = new Put(Bytes.toBytes(rowKey));//把字符串变成字节序列
            put.addColumn(Bytes.toBytes(cfName), Bytes.toBytes(qualifier), Bytes.toBytes(data));//增加列
            table.put(put);
        } catch (IOException ioe) {
            ioe.printStackTrace();
        }
        return true;
    }

     public static boolean putRows(String tableName, List<Put> puts) {
        try (Table table = HBaseConn.getTable(tableName)) {
            table.put(puts);
        } catch (IOException ioe) {
            ioe.printStackTrace();
        }
        return true;
    }
            }

/**
 * 插入hbase索引表 主键字段拼接
 */
public class RowKeyIndex {
    public static byte[] rowKeyIndex(Map<String, String> dataMap, String rowKey) {

        byte[] addFour;

        byte[] modelIDS = Bytes.toBytes(dataMap.get("modelID"));
        byte transformChar = (byte) 0;
        byte[] transfromChars = new byte[]{transformChar};
        byte[] addOne = ArrayUtils.addAll(modelIDS, transfromChars);
        byte[] deviceIDS = Bytes.toBytes(dataMap.get("deviceID"));
        byte[] addTwo = ArrayUtils.addAll(deviceIDS, transfromChars);

        byte[] oneAddTwo = ArrayUtils.addAll(addOne, addTwo);

        byte[] deviceTimes = Bytes.toBytes(dataMap.get("deviceTime"));
        byte[] addThree = ArrayUtils.addAll(deviceTimes, transfromChars);
        if (StringUtils.isBlank(dataMap.get("processState"))) {
            addFour = ArrayUtils.addAll(null, transfromChars);
        } else {
            byte[] processStates = Bytes.toBytes(dataMap.get("processState"));
            addFour = ArrayUtils.addAll(processStates, transfromChars);
        }


        byte[] threeAddFour = ArrayUtils.addAll(addThree, addFour);

        byte[] oneToFour = ArrayUtils.addAll(oneAddTwo, threeAddFour);

        byte[] rowKeyBytes = Bytes.toBytes(rowKey);

        byte[] rowKeyIndex = ArrayUtils.addAll(oneToFour, rowKeyBytes);

        return rowKeyIndex;
    }
}

 

5.mysql

/**
 * @author lijj
 * @date 2020-03-05
 */
@Entity
@Data
@Table(name="device_model",uniqueConstraints = {@UniqueConstraint(columnNames = {"model_id"})})
public class DeviceModel implements Serializable {

    /**
     * ID
     */
    @Id
    @GeneratedValue(strategy = GenerationType.IDENTITY)
    @Column(name = "id")
    private Long id;

    /**
     * 模型ID
     */
    @Column(name = "model_id",nullable = false)
    private String modelId;

    /**
     * 模型名称
     */
    @Column(name = "model_name",nullable = false)
    private String modelName;

    /**
     * 描述
     */
    @Column(name = "model_desc")
    private String modelDesc;

    /**
     * 创建时间
     */
    @CreationTimestamp
    @Column(name = "create_time",nullable = false)
    private Timestamp createTime;

    /**
     * 更新时间
     */
    @UpdateTimestamp
    @Column(name = "update_time",nullable = false)
    private Timestamp updateTime;
 /**
     * 多级子设备标识
     */
    @Column(name = "sub_device")
    private String subDevice;
}

6.domain


/**
 * @author jie
 * @date 2018-12-03
 */
@Service
@Transactional(propagation = Propagation.SUPPORTS, readOnly = true, rollbackFor = Exception.class)
public class DeviceModelQueryService {

    @Autowired
    private DeviceModelRepository deviceModelRepository;


    /**
     * 根据modelId查找产品
     */
    public DeviceModel queryByModelId(String modelId) {
        DeviceModel deviceModel = deviceModelRepository.findByModelId(modelId);
        return deviceModel;
    }

}

7.query

**
 * @author lijj
 * @date 2020-03-05
 */
public interface DeviceModelRepository extends JpaRepository<DeviceModel, Long>, JpaSpecificationExecutor {

    /**
     * 根据模型ID查找模型
     * @param modelId
     * @return
     */
    DeviceModel findByModelId(String modelId);

}

8.repository

/**
 * @author lijj
 * @date 2020-05-12
 */
public class CopyUtil {


    public static <T> T copy(Object source,Class<T> clazz){
        if (source==null){
            return null;
        }
        T obj=null;
        try {
            obj=clazz.newInstance();
        } catch (Exception e) {
            e.printStackTrace();
        }
        BeanUtils.copyProperties(source,obj);
        return obj;
    }

   public static <T> List<T> copyList(List source,Class<T> clazz){
       List<T> target=new ArrayList<>();
       if (!CollectionUtil.isEmpty(source)){
           for (Object c:source){
               T obj=copy(c,clazz);
               target.add(obj);
           }
       }
       return target;
   }
}

9.utils

/**
 * @author lil
 */
@Slf4j
@SpringBootApplication(scanBasePackages = {"com.dgindusoft"})
public class ConnectorApplication {
    public static void main(String[] args) {
        SpringApplication.run(ConnectorApplication.class, args);
        log.info("====================启动成功!!!=====================");
    }

    @Bean
    MeterRegistryCustomizer<MeterRegistry> configurer() {
        return (registry) -> registry.config().commonTags("application", "connector");
    }
}

10.配置

spring:
  indexName: idx_spc_
  kafka:
    bootstrap-servers: kafka1:9092,kafka2:9092,kafka3:9092
    consumer:
      # 最早未被消费的offset
      auto-offset-reset: earliest
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      # 默认消费者组
      group-id1: connector1
      group-id2: connector2
      group-id3: connector3
      # 批量一次最大拉取数据量
      max-poll-records: 1000
      # 自动提交
      auto-commit-interval: 1000
      enable-auto-commit: false

  #mysql配置
  datasource:
    druid:
      db-type: com.alibaba.druid.pool.DruidDataSource
      driverClassName: com.mysql.cj.jdbc.Driver
      url: jdbc:mysql://172.16.18.211:3306/gring_device?serverTimezone=Asia/Shanghai&characterEncoding=utf8&useSSL=false
      username: dgis
      password: dgis_1234
 # 初始化连接大小
      initial-size: 5
      # 最小空闲连接数
      min-idle: 5
      max-active: 20
      max-wait: 30000
      # 可关闭的空闲连接间隔时间
      time-between-eviction-runs-millis: 60000
      # 配置连接在池中的最小生存时间
      min-evictable-idle-time-millis: 300000
      validation-query: select '1' from dual
      # 打开PSCache,并且指定每个连接上PSCache的大小
      pool-prepared-statements: true
      max-open-prepared-statements: 20
      max-pool-prepared-statement-per-connection-size: 20
  #配置 Jpa
  jpa:
    hibernate:
      # 生产环境设置成 none,避免程序运行时自动更新数据库结构
      ddl-auto: none
    properties:
      hibernate:
        dialect: org.hibernate.dialect.MySQL5InnoDBDialect

    open-in-view: true
hbase:
  zookeeper:
    quorum: zookeeper1:2181,zookeeper2:2181,zookeeper3:2181

#prometheus监控配置
management:
  endpoints:
    web:
      exposure:
        include: '*'
      base-path: /actuator
    health:
      show-details: always
  metrics:
    export:
      prometheus:
        enabled: true
      jmx:
        enabled: true

server:
  port: 8201
# 日志目录
logging:
  module: CONNECTOR
  path: ./logs/
  config: classpath:config/logback-spring.xml

11。logback

<?xml version="1.0" encoding="UTF-8"?>
<configuration scan="true" scanPeriod="10 seconds">
    <contextName>GringIIoD</contextName>

    <property name="log.path" value="${LOG_PATH}"/>
    <springProperty scope="context" name="log.module" source="logging.module"/>

    <!--0. 日志格式和颜色渲染 -->
    <!-- 彩色日志依赖的渲染类 -->
    <conversionRule conversionWord="clr" converterClass="org.springframework.boot.logging.logback.ColorConverter"/>
    <conversionRule conversionWord="wex"
                    converterClass="org.springframework.boot.logging.logback.WhitespaceThrowableProxyConverter"/>
    <conversionRule conversionWord="wEx"
                    converterClass="org.springframework.boot.logging.logback.ExtendedWhitespaceThrowableProxyConverter"/>
    <!-- 彩色日志格式 -->
    <property name="CONSOLE_LOG_PATTERN"
              value="${CONSOLE_LOG_PATTERN:-%clr(%d{yyyy-MM-dd HH:mm:ss.SSS}){faint} %clr(${LOG_LEVEL_PATTERN:-%5p}) %clr(${PID:- }){magenta} %clr(---){faint} %clr([%15.15t]){faint} %clr(%-40.40logger{39}){cyan} %clr(:){faint} %m%n${LOG_EXCEPTION_CONVERSION_WORD:-%wEx}}"/>

   <!--1. 输出到控制台-->
    <appender name="CONSOLE" class="ch.qos.logback.core.ConsoleAppender">
        <!--此日志appender是为开发使用,只配置最底级别,控制台输出的日志级别是大于或等于此级别的日志信息-->
        <filter class="ch.qos.logback.classic.filter.ThresholdFilter">
            <level>info</level>
        </filter>
        <encoder>
            <Pattern>${CONSOLE_LOG_PATTERN}</Pattern>
            <!-- 设置字符集 -->
            <charset>UTF-8</charset>
        </encoder>
    </appender>
 <!--2. 输出到文档-->
    <!-- 2.1 level为 DEBUG 日志,时间滚动输出  -->
    <appender name="DEBUG_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <!-- 正在记录的日志文档的路径及文档名 -->
        <file>${log.path}/${log.module}-debug.log</file>
        <!--日志文档输出格式-->
        <encoder>
            <pattern>${log.module} %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50}.%M\(%line\) - %msg%n</pattern>
            <charset>UTF-8</charset> <!-- 设置字符集 -->
        </encoder>
  <!-- 日志记录器的滚动策略,按日期,按大小记录 -->
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <!-- 日志归档 -->
            <fileNamePattern>${log.path}/${log.module}-debug-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <maxFileSize>100MB</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
            <!--日志文档保留天数-->
            <maxHistory>15</maxHistory>
        </rollingPolicy>
        <!-- 此日志文档只记录debug级别的 -->
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>debug</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
    </appender>
 <!-- 2.2 level为 INFO 日志,时间滚动输出  -->
    <appender name="INFO_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <!-- 正在记录的日志文档的路径及文档名 -->
        <file>${log.path}/${log.module}-info.log</file>
        <!--日志文档输出格式-->
        <encoder>
            <pattern>${log.module} %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50}.%M\(%line\) - %msg%n
            </pattern>
            <!--            <pattern>%date{yyyy-MM-dd HH:mm:ss.SSS} | %thread | %-5level | %logger{36}.%M\(%line\) | %X{clientDatetime} | %X{ip} | %X{clientIp} | %X{upIp} | %X{tokenId} | %X{operateId} | %X{deviceId} |  %msg%n</pattern>-->
            <charset>UTF-8</charset>
        </encoder>
        <!-- 日志记录器的滚动策略,按日期,按大小记录 -->
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
              <!-- 每天日志归档路径以及格式 -->
            <fileNamePattern>${log.path}/${log.module}-info-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <maxFileSize>100MB</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
            <!--日志文档保留天数-->
            <maxHistory>15</maxHistory>
        </rollingPolicy>
        <!-- 此日志文档只记录info级别的 -->
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>info</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
    </appender>

 <!-- 2.3 level为 WARN 日志,时间滚动输出  -->
    <appender name="WARN_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <!-- 正在记录的日志文档的路径及文档名 -->
        <file>${log.path}/${log.module}-warn.log</file>
        <!--日志文档输出格式-->
        <encoder>
            <pattern>${log.module} %d{yyyy-MM-dd HH:mm:ss.SSS} [%thread] %-5level %logger{50}.%M\(%line\) - %msg%n
            </pattern>
            <charset>UTF-8</charset> <!-- 此处设置字符集 -->
        </encoder>
        <!-- 日志记录器的滚动策略,按日期,按大小记录 -->
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <fileNamePattern>${log.path}/${logging.module}-warn-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <maxFileSize>100MB</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
             <!--日志文档保留天数-->
            <maxHistory>15</maxHistory>
        </rollingPolicy>
        <!-- 此日志文档只记录warn级别的 -->
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>warn</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
        <!--<filter class="ch.qos.logback.classic.filter.ThresholdFilter">-->
        <!--<level>WARN</level>-->
        <!--</filter>-->
    </appender>

 <!-- 2.4 level为 ERROR 日志,时间滚动输出  -->
    <appender name="ERROR_FILE" class="ch.qos.logback.core.rolling.RollingFileAppender">
        <!-- 正在记录的日志文档的路径及文档名 -->
        <file>${log.path}/${log.module}-error.log</file>
        <!--日志文档输出格式-->
        <encoder>
            <pattern>${log.module}|%d{yyyy-MM-dd HH:mm:ss.SSS}|[%thread]|%-5level|%logger{50}.%M\(%line\)|%msg%n
            </pattern>
            <charset>UTF-8</charset> <!-- 此处设置字符集 -->
        </encoder>
        <!-- 日志记录器的滚动策略,按日期,按大小记录 -->
        <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
            <fileNamePattern>${log.path}/${log.module}-error-%d{yyyy-MM-dd}.%i.log</fileNamePattern>
            <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
                <maxFileSize>100MB</maxFileSize>
            </timeBasedFileNamingAndTriggeringPolicy>
              <!--日志文档保留天数-->
            <maxHistory>15</maxHistory>
        </rollingPolicy>
        <!-- 此日志文档只记录ERROR级别的 -->
        <filter class="ch.qos.logback.classic.filter.LevelFilter">
            <level>ERROR</level>
            <onMatch>ACCEPT</onMatch>
            <onMismatch>DENY</onMismatch>
        </filter>
    </appender>
<!-- 4. 最终的策略 -->
    <logger name="com.dgindusoft" level="debug"/>
    <root level="info">
        <appender-ref ref="CONSOLE"/>
        <appender-ref ref="DEBUG_FILE"/>
        <appender-ref ref="INFO_FILE"/>
        <appender-ref ref="WARN_FILE"/>
        <appender-ref ref="ERROR_FILE"/>
    </root>

</configuration>
       

12.pom.xml

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <parent>
        <groupId>org.springframework.boot</groupId>
        <artifactId>spring-boot-starter-parent</artifactId>
        <version>2.1.0.RELEASE</version>
        <relativePath/> <!-- lookup parent from repository -->
    </parent>
    <groupId>com.dgindusoft</groupId>
    <artifactId>connector</artifactId>
    <version>1.0</version>
    <name>connector</name>
    <description>this component connect kafka with hbase</description>
 <properties>
        <java.version>1.8</java.version>
        <druid.version>1.1.10</druid.version>
        <hutool.version>5.3.7</hutool.version>
    </properties>
    <dependencies>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-web</artifactId>
        </dependency>
        <dependency>
            <groupId>org.projectlombok</groupId>
            <artifactId>lombok</artifactId>
            <optional>true</optional>
        </dependency>
        <dependency>
            <groupId>org.springframework.kafka</groupId>
            <artifactId>spring-kafka</artifactId>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-test</artifactId>
         
  <scope>test</scope>
            <exclusions>
                <exclusion>
                    <groupId>org.junit.vintage</groupId>
                    <artifactId>junit-vintage-engine</artifactId>
                </exclusion>
            </exclusions>
        </dependency>
        <dependency>

 <groupId>org.apache.phoenix</groupId>
            <artifactId>phoenix-core</artifactId>
            <exclusions>
                <exclusion>
                    <groupId>log4j</groupId>
                    <artifactId>log4j</artifactId>
                </exclusion>
                <exclusion>
                    <groupId>org.slf4j</groupId>
                    <artifactId>slf4j-log4j12</artifactId>
                </exclusion>
            </exclusions>
            <version>5.0.0-HBase-2.0</version>
        </dependency>
        <!-- https://mvnrepository.com/artifact/org.apache.hadoop/hadoop-common -->

 <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <exclusions>
                <exclusion>
                    <groupId>log4j</groupId>
                    <artifactId>log4j</artifactId>
                </exclusion>
                <exclusion>
                    <groupId>org.slf4j</groupId>
                    <artifactId>slf4j-log4j12</artifactId>
                </exclusion>
            </exclusions>
            <version>3.1.2</version>
        </dependency>
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>fastjson</artifactId>
            <version>1.2.59</version>
        </dependency>

 <!--Mysql依赖包 start-->
        <dependency>
            <groupId>mysql</groupId>
            <artifactId>mysql-connector-java</artifactId>
            <scope>runtime</scope>
        </dependency>
        <dependency>
            <groupId>com.alibaba</groupId>
            <artifactId>druid-spring-boot-starter</artifactId>
            <version>${druid.version}</version>
        </dependency>
        <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-data-jpa</artifactId>
        </dependency>
        <!--Mysql依赖包 end-->
        <dependency>
            <groupId>cn.hutool</groupId>
            <artifactId>hutool-all</artifactId>
            <version>${hutool.version}</version>
        </dependency>
        <!--prometheus监控  https://prometheus.io/docs/introduction/overview/-->

  <dependency>
            <groupId>org.springframework.boot</groupId>
            <artifactId>spring-boot-starter-actuator</artifactId>
        </dependency>
        <dependency>
            <groupId>io.micrometer</groupId>
            <artifactId>micrometer-registry-prometheus</artifactId>
        </dependency>
       </dependencies>
    <repositories>
        <repository>
            <id>public</id>
            <name>nexus</name>
            <url>http://maven.aliyun.com/nexus/content/groups/public/</url>
            <releases>
                <enabled>true</enabled>
            </releases>
        </repository>
    </repositories>
    <pluginRepositories>
        <pluginRepository>
            <id>public</id>
            <name>nexus</name>
            <url>http://maven.aliyun.com/nexus/content/groups/public/</url>
            <releases>
                <enabled>true</enabled>
            </releases>
            <snapshots>
                <enabled>false</enabled>
            </snapshots>
        </pluginRepository>
    </pluginRepositories>
    <build>
        <plugins>
            <plugin>
                <groupId>org.springframework.boot</groupId>
                <artifactId>spring-boot-maven-plugin</artifactId>
            </plugin>
            <plugin>
                <groupId>org.apache.maven.plugins</groupId>
                <artifactId>maven-surefire-plugin</artifactId>
                <configuration>
                    <skip>true</skip>
                </configuration>
            </plugin>
        </plugins>

    </build>


</project>

猜你喜欢

转载自blog.csdn.net/qq_35207086/article/details/124984959