Explorar o código

添加一些经典代码

zhzhenqin %!s(int64=4) %!d(string=hai) anos
pai
achega
4cd41a6035

+ 149 - 0
common-utils/NetworkInterfaceManager.java

@@ -0,0 +1,149 @@
+package com.primeton.damp.utils;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.Inet4Address;
+import java.net.NetworkInterface;
+import java.net.SocketException;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.Objects;
+
+/**
+ *
+ * 导入 java.net.InetAddress 太保不允许,因此写入到代码里,防止代码扫描工具报安全问题
+ * <pre>
+ *
+ * Created by zhenqin.
+ * User: zhenqin
+ * Date: 2019/7/18
+ * Time: 11:02
+ * Vendor: yiidata.com
+ * To change this template use File | Settings | File Templates.
+ *
+ * </pre>
+ *
+ * @author zhenqin
+ */
+public enum  NetworkInterfaceManager {
+
+
+    INSTANCE;
+
+
+    private java.net.InetAddress m_local;
+
+
+
+    private java.net.InetAddress m_localHost;
+
+
+    private static Logger logger = LoggerFactory.getLogger(NetworkInterfaceManager.class);
+
+    private NetworkInterfaceManager() {
+        load();
+    }
+
+
+
+    public java.net.InetAddress findValidateIp(List<java.net.InetAddress> addresses) {
+        java.net.InetAddress local = null;
+        int maxWeight = -1;
+
+        for (java.net.InetAddress address : addresses) {
+            if(address instanceof Inet4Address) {
+                int weight = 0;
+                if(address.isSiteLocalAddress()) {
+                    weight += 8;
+                }
+
+                if(address.isLinkLocalAddress()) {
+                    weight += 4;
+                }
+
+                if(address.isLoopbackAddress()) {
+                    weight += 2;
+                }
+
+                if(!Objects.equals(address.getHostName(), address.getHostAddress())) {
+                    weight += 1;
+                }
+
+                // 找到权重最大的网卡地址,绑定
+                if(weight > maxWeight) {
+                    maxWeight = weight;
+                    local = address;
+                }
+            }
+        }
+        return local;
+    }
+
+    public String getLocalHostAddress() {
+        return m_local.getHostAddress();
+    }
+
+
+    public String getLocalHostName() {
+        try {
+            if(m_localHost == null) {
+                m_localHost = java.net.InetAddress.getLocalHost();
+            }
+            return m_localHost.getHostName();
+        } catch (UnknownHostException e) {
+            return m_local.getHostName();
+        }
+    }
+
+    private String getProperty(String jvmArg, String envArg) {
+        String value = System.getenv(envArg);
+        if(value == null) {
+            value = System.getProperty(jvmArg);
+        }
+        return value;
+    }
+
+
+    private final void load() {
+        String ip = getProperty("host.name", "HOST_NAME");
+        if(ip != null) {
+            try {
+                m_local = java.net.InetAddress.getByName(ip);
+                return;
+            } catch (UnknownHostException e) {
+                logger.error("ip 获取异常。", e);
+                //ignore
+            }
+        }
+
+        try {
+            Enumeration<NetworkInterface> interfaces = NetworkInterface.getNetworkInterfaces();
+            List<NetworkInterface> nis = (interfaces == null) ? Collections.emptyList() : Collections.list(interfaces);
+            List<java.net.InetAddress> addresses = new ArrayList<>(nis.size());
+            java.net.InetAddress local = null;
+            try {
+                for (NetworkInterface ni : nis) {
+                    if(ni.isUp() && !ni.isLoopback()) {
+                        addresses.addAll(Collections.list(ni.getInetAddresses()));
+                    }
+                }
+
+                local = findValidateIp(addresses);
+            } catch (Exception e) {
+                //ignore
+            }
+
+            if(local != null) {
+                m_local = local;
+                return;
+            }
+        } catch (SocketException e) {
+            //ignore except
+        }
+        m_local = java.net.InetAddress.getLoopbackAddress();
+    }
+}

+ 4 - 0
common-utils/README.md

@@ -0,0 +1,4 @@
+功能说明:
+
+- ID 生成器:SnowflakeIdUtils
+- 网卡地址获取:NetworkInterfaceManager

+ 189 - 0
common-utils/SnowflakeIdUtils.java

@@ -0,0 +1,189 @@
+package com.primeton.damp.utils;
+
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.commons.lang3.StringUtils;
+
+import java.net.Inet4Address;
+import java.net.UnknownHostException;
+
+/**
+ * Twitter_Snowflake<br>
+ * SnowFlake的结构如下(每部分用-分开):<br>
+ * 0 - 0000000000 0000000000 0000000000 0000000000 0 - 00000 - 00000 - 000000000000 <br>
+ * 1位标识,由于long基本类型在Java中是带符号的,最高位是符号位,正数是0,负数是1,所以id一般是正数,最高位是0<br>
+ * 41位时间截(毫秒级),注意,41位时间截不是存储当前时间的时间截,而是存储时间截的差值(当前时间截 - 开始时间截)
+ * 得到的值),这里的的开始时间截,一般是我们的id生成器开始使用的时间,由我们程序来指定的(如下下面程序IdWorker类的startTime属性)。41位的时间截,可以使用69年,年T = (1L << 41) / (1000L * 60 * 60 * 24 * 365) = 69<br>
+ * 10位的数据机器位,可以部署在1024个节点,包括5位datacenterId和5位workerId<br>
+ * 12位序列,毫秒内的计数,12位的计数顺序号支持每个节点每毫秒(同一机器,同一时间截)产生4096个ID序号<br>
+ * 加起来刚好64位,为一个Long型。<br>
+ * SnowFlake的优点是,整体上按照时间自增排序,并且整个分布式系统内不会产生ID碰撞(由数据中心ID和机器ID作区分),并且效率较高,经测试,SnowFlake每秒能够产生26万ID左右。
+ */
+
+public class SnowflakeIdUtils {
+    // ==============================Fields===========================================
+    /** 开始时间截 (2015-01-01) */
+    private final long twepoch = 1489111610226L;
+
+    /** 机器id所占的位数 */
+    private final long workerIdBits = 5L;
+
+    /** 数据标识id所占的位数 */
+    private final long dataCenterIdBits = 5L;
+
+    /** 支持的最大机器id,结果是31 (这个移位算法可以很快的计算出几位二进制数所能表示的最大十进制数) */
+    private final long maxWorkerId = -1L ^ (-1L << workerIdBits);
+
+    /** 支持的最大数据标识id,结果是31 */
+    private final long maxDataCenterId = -1L ^ (-1L << dataCenterIdBits);
+
+    /** 序列在id中占的位数 */
+    private final long sequenceBits = 12L;
+
+    /** 机器ID向左移12位 */
+    private final long workerIdShift = sequenceBits;
+
+    /** 数据标识id向左移17位(12+5) */
+    private final long dataCenterIdShift = sequenceBits + workerIdBits;
+
+    /** 时间截向左移22位(5+5+12) */
+    private final long timestampLeftShift = sequenceBits + workerIdBits + dataCenterIdBits;
+
+    /** 生成序列的掩码,这里为4095 (0b111111111111=0xfff=4095) */
+    private final long sequenceMask = -1L ^ (-1L << sequenceBits);
+
+    /** 工作机器ID(0~31) */
+    private long workerId;
+
+    /** 数据中心ID(0~31) */
+    private long dataCenterId;
+
+    /** 毫秒内序列(0~4095) */
+    private long sequence = 0L;
+
+    /** 上次生成ID的时间截 */
+    private long lastTimestamp = -1L;
+
+    private static SnowflakeIdUtils idWorker = new SnowflakeIdUtils(getWorkId(), getDataCenterId());
+
+    //==============================Constructors=====================================
+    /**
+     * 构造函数
+     * @param workerId 工作ID (0~31)
+     * @param dataCenterId 数据中心ID (0~31)
+     */
+    private SnowflakeIdUtils(long workerId, long dataCenterId) {
+        if (workerId > maxWorkerId || workerId < 0) {
+            throw new IllegalArgumentException(String.format("workerId can't be greater than %d or less than 0", maxWorkerId));
+        }
+        if (dataCenterId > maxDataCenterId || dataCenterId < 0) {
+            throw new IllegalArgumentException(String.format("dataCenterId can't be greater than %d or less than 0", maxDataCenterId));
+        }
+        this.workerId = workerId;
+        this.dataCenterId = dataCenterId;
+    }
+
+    // ==============================Methods==========================================
+    /**
+     * 获得下一个ID (该方法是线程安全的)
+     * @return SnowflakeId
+     */
+    public synchronized long nextId() {
+        long timestamp = timeGen();
+
+        //如果当前时间小于上一次ID生成的时间戳,说明系统时钟回退过这个时候应当抛出异常
+        if (timestamp < lastTimestamp) {
+            throw new RuntimeException(
+                    String.format("Clock moved backwards.  Refusing to generate id for %d milliseconds", lastTimestamp - timestamp));
+        }
+
+        //如果是同一时间生成的,则进行毫秒内序列
+        if (lastTimestamp == timestamp) {
+            sequence = (sequence + 1) & sequenceMask;
+            //毫秒内序列溢出
+            if (sequence == 0) {
+                //阻塞到下一个毫秒,获得新的时间戳
+                timestamp = tilNextMillis(lastTimestamp);
+            }
+        }
+        //时间戳改变,毫秒内序列重置
+        else {
+            sequence = 0L;
+        }
+
+        //上次生成ID的时间截
+        lastTimestamp = timestamp;
+
+        //移位并通过或运算拼到一起组成64位的ID
+        return ((timestamp - twepoch) << timestampLeftShift)
+                | (dataCenterId << dataCenterIdShift)
+                | (workerId << workerIdShift)
+                | sequence;
+    }
+
+    /**
+     * 阻塞到下一个毫秒,直到获得新的时间戳
+     * @param lastTimestamp 上次生成ID的时间截
+     * @return 当前时间戳
+     */
+    protected long tilNextMillis(long lastTimestamp) {
+        long timestamp = timeGen();
+        while (timestamp <= lastTimestamp) {
+            timestamp = timeGen();
+        }
+        return timestamp;
+    }
+
+    /**
+     * 返回以毫秒为单位的当前时间
+     * @return 当前时间(毫秒)
+     */
+    protected long timeGen() {
+        return System.currentTimeMillis();
+    }
+
+    private static Long getWorkId(){
+        try {
+            String hostAddress = Inet4Address.getLocalHost().getHostAddress();
+            int[] ints = StringUtils.toCodePoints(hostAddress);
+            int sums = 0;
+            for(int b : ints){
+                sums += b;
+            }
+            return (long)(sums % 32);
+        } catch (UnknownHostException e) {
+            // 如果获取失败,则使用随机数备用
+            return RandomUtils.nextLong(0,31);
+        }
+    }
+
+    private static Long getDataCenterId(){
+        int[] ints = StringUtils.toCodePoints(NetworkInterfaceManager.INSTANCE.getLocalHostAddress());
+        int sums = 0;
+        for (int i : ints) {
+            sums += i;
+        }
+        return (long)(sums % 32);
+    }
+
+
+    /**
+     * 静态工具类
+     *
+     * @return
+     */
+    public static Long generateId(){
+        return idWorker.nextId();
+    }
+
+    //==============================Test=============================================
+    /** 测试 */
+    public static void main(String[] args) {
+        System.out.println(System.currentTimeMillis());
+        long startTime = System.nanoTime();
+        for (int i = 0; i < 50000; i++) {
+            long id = SnowflakeIdUtils.generateId();
+            System.out.println(id);
+        }
+        System.out.println((System.nanoTime()-startTime)/1000000+"ms");
+    }
+}

+ 352 - 0
hbaseservice/CommonHBaseService.java

@@ -0,0 +1,352 @@
+package com.primeton.dsp.datarelease.data.service;
+
+import com.alibaba.fastjson.JSONObject;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HBaseConfiguration;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.*;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.*;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * <p>
+ * HBase 操作类工具类。
+ * </p>
+ * <p/>
+ * Created by ZhenQin on 2018/5/17 0017-9:52
+ * Vendor: 9sdata.cn
+ */
+public class CommonHBaseService implements Closeable  {
+
+
+    /**
+     * 对象值转为 Byte[] 类型,先转为 文本类型再转为 byte[]
+     */
+    public final static int TEXT_BYTES_TYPE = 0;
+
+
+    /**
+     * HBase key 和 value 全部为 bin 类型
+     */
+    public final static int BIN_BYTES_TYPE = 1;
+
+
+    /**
+     * Hbase table cached
+     */
+    final static Cache<String, Table> HBASE_TABLE_CACHED = CacheBuilder.newBuilder()
+            .maximumSize(100)
+            .initialCapacity(20)
+            .expireAfterAccess(30, TimeUnit.MINUTES)  // 30 分钟不从缓存中读取该表,则移除出缓存
+            .removalListener(new RemovalListener<String, Table>() {
+                @Override
+                public void onRemoval(RemovalNotification<String, Table> notification) {
+                    try {
+                        notification.getValue().close();
+                    } catch (IOException e) {
+                        e.printStackTrace();
+                    }
+                }
+            })
+            .build();
+
+    /**
+     * tablename
+     */
+    final String tableName;
+
+    /**
+     * Hbase Table
+     */
+    final Table table;
+
+    /**
+     * 选择
+     */
+    private int bytesType = TEXT_BYTES_TYPE;
+
+
+    final static Logger logger = LoggerFactory.getLogger(CommonHBaseService.class);
+
+    /**
+     * 构造方法传入Hbase Table
+     *
+     * @param table
+     */
+    public CommonHBaseService(Table table) {
+        this(table, TEXT_BYTES_TYPE);
+    }
+
+
+    /**
+     * 构造方法传入Hbase Table
+     *
+     * @param table
+     */
+    public CommonHBaseService(Table table, int type) {
+        this.table = table;
+        this.tableName = table.getName().getNameAsString();
+        this.bytesType = type;
+    }
+
+
+    public CommonHBaseService(Connection connection, String tableName) throws IOException {
+        Table table = HBASE_TABLE_CACHED.getIfPresent(tableName);
+        if (table == null) {
+            boolean existsTable = connection.getAdmin().tableExists(TableName.valueOf(tableName));
+            //boolean existsTable = false;
+            if (!existsTable) {
+                logger.warn("table: " + tableName + " not exists.");
+                this.tableName = null;
+                //throw new IllegalStateException("table: " + tableName + " not exists.");
+                this.table = null;
+            } else {
+                this.tableName = tableName;
+                this.table = connection.getTable(TableName.valueOf(tableName));
+                HBASE_TABLE_CACHED.put(tableName, this.table);
+            }
+        } else {
+            this.tableName = tableName;
+            this.table = table;
+        }
+        this.bytesType = TEXT_BYTES_TYPE;
+    }
+
+
+    public CommonHBaseService(Configuration conf, String tableName) throws IOException {
+        this(ConnectionFactory.createConnection(conf), tableName);
+    }
+
+    public int put(JSONObject values, String defaultFamily) throws IOException {
+        String key = values.getString("key");
+        final JSONObject copy = new JSONObject(values.size() - 1);
+        Set<Map.Entry<String, Object>> entries = values.entrySet();
+        for (Map.Entry<String, Object> entry : entries) {
+            if ("key".equals(entry.getKey())) {
+                continue;
+            }
+            copy.put(entry.getKey(), entry.getValue());
+        }
+        return put(key, copy, defaultFamily);
+    }
+
+
+    public int put(String key, JSONObject values, String defaultFamily) throws IOException {
+        table.put(jsonConvert2Put(key, values, defaultFamily));
+        return 1;
+    }
+
+
+    public int puts(JSONObject[] values, String defaultFamily) throws IOException {
+        return puts(Arrays.asList(values), defaultFamily);
+    }
+
+
+    public int puts(Collection<JSONObject> values, String defaultFamily) throws IOException {
+        List<Put> puts = new ArrayList<>(values.size());
+        for (JSONObject value : values) {
+            puts.add(jsonConvert2Put(value.getString("key"), value, defaultFamily));
+        }
+        table.put(puts);
+        return puts.size();
+    }
+
+    /**
+     * 把 JSON 转为 HBase 的 put, json 中一定要包含 key 字段
+     *
+     * @param key
+     * @param value
+     * @param defaultFamily
+     * @return
+     */
+    private Put jsonConvert2Put(String key, JSONObject value, String defaultFamily) {
+        if (StringUtils.isBlank(key)) {
+            throw new IllegalArgumentException("no key value to set...");
+        }
+        Put put = new Put(Bytes.toBytes(key));
+        Set<Map.Entry<String, Object>> entries = value.entrySet();
+        for (Map.Entry<String, Object> entry : entries) {
+            String field = entry.getKey();
+            if (StringUtils.isBlank(field) || ":".equals(field)) {
+                throw new IllegalArgumentException("invalid field: " + field);
+            }
+            int indexOf = field.indexOf(":");
+            if (indexOf <= 0) {
+                //a=abc OR :a=abc
+                field = indexOf == 0 ? field.substring(1) : field;
+                put.addColumn(Bytes.toBytes(defaultFamily), Bytes.toBytes(field), getBytes(entry.getValue()));
+            } else {
+                //f:a=abc
+                String f = field.substring(0, indexOf);
+                String x = field.substring(indexOf + 1);
+                put.addColumn(Bytes.toBytes(f), Bytes.toBytes(x), getBytes(entry.getValue()));
+            }
+        }
+        return put;
+    }
+
+    /**
+     * 对象值转为 byte[] 类型
+     *
+     * @param value
+     * @return
+     */
+    private byte[] getBytes(Object value) {
+        if (value == null) {
+            return new byte[0];
+        }
+        if (value instanceof String) {
+            return Bytes.toBytes((String) value);
+        }
+
+        if (value instanceof byte[]) {
+            return (byte[]) value;
+        }
+
+        if (bytesType == 0) {
+            return Bytes.toBytes(String.valueOf(value));
+        }
+
+        if (value instanceof Integer) {
+            return Bytes.toBytes((Integer) value);
+        } else if (value instanceof Long) {
+            return Bytes.toBytes((Long) value);
+        } else if (value instanceof Double) {
+            return Bytes.toBytes((Double) value);
+        } else if (value instanceof Float) {
+            return Bytes.toBytes((Float) value);
+        } else if (value instanceof Byte) {
+            return Bytes.toBytes((Byte) value);
+        } else if (value instanceof Boolean) {
+            return Bytes.toBytes((Boolean) value);
+        } else if (value instanceof Short) {
+            return Bytes.toBytes((Short) value);
+        } else if (value instanceof Date) {
+            return Bytes.toBytes(((Date) value).getTime());
+        }
+
+        // 其他的未知类型,统一设置为字符串
+        return Bytes.toBytes(String.valueOf(value));
+    }
+
+
+    /**
+     * HBaseResult 转换为JSON, HBase rowkey 放在 json的 row 里,必须是字符串形式。
+     *
+     * @param result
+     * @return
+     */
+    private JSONObject result2JSON(Result result) {
+        JSONObject json = new JSONObject();
+        byte[] row = result.getRow();
+        if (row == null || row.length == 0) {
+            return json;
+        }
+        json.put("key", Bytes.toString(row));
+
+        NavigableMap<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> map = result.getMap();
+        if (map == null || map.isEmpty()) {
+            return json;
+        }
+        for (Map.Entry<byte[], NavigableMap<byte[], NavigableMap<Long, byte[]>>> entry : map.entrySet()) {
+            byte[] familyBytes = entry.getKey();
+            String family = Bytes.toString(familyBytes);
+            NavigableMap<byte[], NavigableMap<Long, byte[]>> values = entry.getValue();
+            for (Map.Entry<byte[], NavigableMap<Long, byte[]>> en : values.entrySet()) {
+                String field = Bytes.toString(en.getKey());
+
+                NavigableMap<Long, byte[]> value = en.getValue();
+                Map.Entry<Long, byte[]> firstEntry = value.firstEntry();
+                if (firstEntry != null) {
+                    json.put(family + ":" + field, Bytes.toString(firstEntry.getValue()));
+                }
+            }
+        }
+        return json;
+    }
+
+    public JSONObject get(String rowKey) throws IOException {
+        if (StringUtils.isBlank(this.tableName)) {
+            // table not exists.
+            return null;
+        }
+        Result result = table.get(new Get(Bytes.toBytes(rowKey)));
+        if (result == null) {
+            return null;
+        }
+        return result2JSON(result);
+    }
+
+
+    public List<JSONObject> get(List<String> rowKeys) throws IOException {
+        if (StringUtils.isBlank(this.tableName)) {
+            // table not exists.
+            return null;
+        }
+        List<Get> gets = new ArrayList<>(rowKeys.size());
+        for (String rowKey : rowKeys) {
+            gets.add(new Get(Bytes.toBytes(rowKey)));
+        }
+        Result[] results = table.get(gets);
+        if (results == null) {
+            return null;
+        }
+
+        List<JSONObject> res = new ArrayList<>(rowKeys.size());
+        for (Result result : results) {
+            res.add(result2JSON(result));
+        }
+        return res;
+    }
+
+
+    public int delete(String rowkey) throws IOException {
+        table.delete(new Delete(Bytes.toBytes(rowkey)));
+        return 1;
+    }
+
+
+    public int delete(byte[] rowkey) throws IOException {
+        table.delete(new Delete(rowkey));
+        return 1;
+    }
+
+
+    public int delete(String... rowkeys) throws IOException {
+        return delete(Arrays.asList(rowkeys));
+    }
+
+
+    public int delete(Collection<String> rowkeys) throws IOException {
+        List<Delete> deletes = new ArrayList<>(rowkeys.size());
+        for (String rowkey : rowkeys) {
+            deletes.add(new Delete(Bytes.toBytes(rowkey)));
+        }
+        table.delete(deletes);
+        return deletes.size();
+    }
+
+
+    @Override
+    public void close() throws IOException {
+        if(table != null) {
+            table.close();
+        }
+    }
+
+    public void setBytesType(int bytesType) {
+        this.bytesType = bytesType;
+    }
+
+}

+ 57 - 0
hive_auth/CustomPasswdAuthenticator.java

@@ -0,0 +1,57 @@
+package com.primeton.dgs.extractor.adapter.hive;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.conf.HiveConf;
+import javax.security.sasl.AuthenticationException;
+import org.slf4j.Logger;
+
+/**
+ * <pre>
+ *
+ * Created by zhenqin.
+ * User: zhenqin
+ * Date: 2020/8/10
+ * Time: 18:18
+ * Vendor: yiidata.com
+ * To change this template use File | Settings | File Templates.
+ *
+ * </pre>
+ *
+ * @author zhenqin
+ */
+public class CustomPasswdAuthenticator implements org.apache.hive.service.auth.PasswdAuthenticationProvider {
+
+    private Logger LOG = org.slf4j.LoggerFactory.getLogger(CustomPasswdAuthenticator.class);
+
+    private static final String HIVE_JDBC_PASSWD_AUTH_PREFIX="hive.jdbc_passwd.auth.%s";
+
+    private Configuration conf = null;
+
+    @Override
+    public void Authenticate(String userName, String passwd)
+            throws AuthenticationException {
+        LOG.info("user: " + userName + " try login.");
+        String passwdConf = getConf().get(String.format(HIVE_JDBC_PASSWD_AUTH_PREFIX, userName));
+        if (passwdConf == null) {
+            String message = "user's ACL configration is not found. user:" + userName;
+            LOG.info(message);
+            throw new AuthenticationException(message);
+        }
+        if (!passwd.equals(passwdConf)) {
+            String message = "user name and password is mismatch. user:" + userName;
+            throw new AuthenticationException(message);
+        }
+    }
+
+    public Configuration getConf() {
+        if (conf == null) {
+            this.conf = new Configuration(new HiveConf());
+        }
+        return conf;
+    }
+
+    public void setConf(Configuration conf) {
+        this.conf = conf;
+    }
+
+}

+ 46 - 0
hive_auth/hive-site.xml

@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+    <property>
+        <name>hive.metastore.authorization.storage.checks</name>
+        <value>true</value>
+        <description>被设置成true时,Hive将会阻止没有权限的用户进行表删除操作。默认值是false</description>
+    </property>
+    <property>
+        <name>hive.security.authorization.enabled</name>
+        <value>true</value>
+        <description>开启Hive的身份认证功能,默认是false</description>
+    </property>
+
+    <property>
+        <name>hive.security.authorization.createtable.owner.grants</name>
+        <value>ALL</value>
+        <!--
+         <value>select,drop</value>
+         <value>irwin,hadoop:select;tom:create</value>
+         -->
+        <description>默认是NULL,建议将其设置成ALL,让用户能够访问自己创建的表。</description>
+    </property>
+
+    <property>
+        <name>hive.server2.authentication</name>
+        <value>CUSTOM</value>
+        <description>自定义远程连接用户名和密码</description>
+    </property>
+
+    <property>
+        <name>hive.server2.custom.authentication.class</name>
+        <value>com.primeton.dgs.extractor.adapter.hive.CustomPasswdAuthenticator</value>
+        <description>指定解析jar包</description>
+    </property>
+    <property>
+        <name>hive.jdbc_passwd.auth.dataware</name>
+        <value>dataware</value>
+        <description>设置用户名和密码</description>
+    </property>
+    <property>
+        <name>hive.jdbc_passwd.auth.hiveread</name>
+        <value>hiveread</value>
+    </property>
+</configuration>

+ 10 - 2
httpclient/RestApiUtils.java

@@ -18,6 +18,7 @@ import org.apache.http.conn.socket.ConnectionSocketFactory;
 import org.apache.http.conn.socket.PlainConnectionSocketFactory;
 import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
 import org.apache.http.entity.StringEntity;
+import org.apache.http.impl.client.BasicCookieStore;
 import org.apache.http.impl.client.CloseableHttpClient;
 import org.apache.http.impl.client.DefaultHttpRequestRetryHandler;
 import org.apache.http.impl.client.HttpClients;
@@ -57,13 +58,19 @@ import java.util.Map;
 public class RestApiUtils {
 
 
-	static Logger log = LoggerFactory.getLogger(RestApiUtils.class);
-	
+    static Logger log = LoggerFactory.getLogger(com.yiidata.intergration.api.utils.RestApiUtils.class);
+
     /**
      * http client
      */
     static final CloseableHttpClient httpClient;
 
+
+    /**
+     * Cookie store
+     */
+    private final static BasicCookieStore cookieStore = new BasicCookieStore();
+
     static {
         try {
             //设置协议http和https对应的处理socket链接工厂的对象
@@ -75,6 +82,7 @@ public class RestApiUtils {
             PoolingHttpClientConnectionManager cm = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
             httpClient = HttpClients.custom()
                     .setConnectionManager(cm)
+                    .setDefaultCookieStore(cookieStore)
                     .setDefaultRequestConfig(RequestConfig.custom().setConnectTimeout(180000).build())
                     .setRetryHandler(new DefaultHttpRequestRetryHandler(3, false))
                     .build();

+ 20 - 10
java-commons-cache/CacheFactory.java

@@ -1,5 +1,6 @@
 package com.yiidata.intergration.web.modules.sys.cache;
 
+import com.primeton.damp.cache.ICache;
 import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -25,7 +26,7 @@ import java.util.Properties;
  * @author zhaopx
  */
 @Component("cache")
-public class CacheFactory implements FactoryBean, InitializingBean, DisposableBean {
+public class CacheFactory implements FactoryBean<ICache>, InitializingBean, DisposableBean {
 
     /**
      * logger
@@ -48,6 +49,10 @@ public class CacheFactory implements FactoryBean, InitializingBean, DisposableBe
     @Value("${cache.redis.port: 6379}")
     int redisPort;
 
+
+    @Value("${cache.redis.password: }")
+    String redisPassword;
+
     /**
      * 配置
      */
@@ -69,11 +74,14 @@ public class CacheFactory implements FactoryBean, InitializingBean, DisposableBe
         config.setProperty("type", Optional.ofNullable(StringUtils.trimToNull(type)).orElse("mem"));
         config.setProperty("host", redisHost);
         config.setProperty("port", ""+redisPort);
+        config.setProperty("password", redisPassword);
         config.setProperty("db", ""+redisDb);
 
-        String cacheType = config.getProperty("type", "ehcache");
-        if(StringUtils.isBlank(cacheType) || "mem".equalsIgnoreCase(cacheType) ||
-                "ehcache".equalsIgnoreCase(cacheType)) {
+        String cacheType = config.getProperty("type", "mem");
+        if (StringUtils.isBlank(cacheType) || "mem".equalsIgnoreCase(cacheType) || "memory".equalsIgnoreCase(cacheType)) {
+            logger.info("use guava cache.");
+            this.cache = new GuavaMemCached();
+        } else if ("ehcache".equalsIgnoreCase(cacheType)) {
             logger.info("use ehcache.");
             this.cache = initEhcache();
         } else if("redis".equalsIgnoreCase(cacheType)) {
@@ -83,8 +91,9 @@ public class CacheFactory implements FactoryBean, InitializingBean, DisposableBe
             }
             int port = Integer.parseInt(this.config.getProperty("port", "3306"));
             int db = Integer.parseInt(this.config.getProperty("db", "0"));
+            String password = StringUtils.trimToNull(this.config.getProperty("password"));
             logger.info("use redis cache {}:{}/{}", host, port, db);
-            this.cache = initRedisCache(host, port, db);
+            this.cache = initRedisCache(host, port, password, db);
         } else {
             throw new IllegalArgumentException("unknown cache type " + cacheType);
         }
@@ -93,13 +102,14 @@ public class CacheFactory implements FactoryBean, InitializingBean, DisposableBe
 
     /**
      * 初始化 Redis 客户端
-     * @param host
-     * @param port
+     * @param host redis host
+     * @param port redis port
+     * @param password redis 密码
      * @param db
      * @return
      */
-    private ICache initRedisCache(String host, int port, int db) {
-        return new Redised(host, port, db);
+    private ICache initRedisCache(String host, int port, String password, int db) {
+        return new Redised(host, port, password, db);
     }
 
     /**
@@ -111,7 +121,7 @@ public class CacheFactory implements FactoryBean, InitializingBean, DisposableBe
     }
 
     @Override
-    public Object getObject() throws Exception {
+    public ICache getObject() throws Exception {
         return cache;
     }
 

+ 72 - 0
java-commons-cache/GuavaMemCached.java

@@ -0,0 +1,72 @@
+package com.primeton.damp.cache;
+
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+
+/**
+ * <pre>
+ *
+ * Created by zhenqin.
+ * User: zhenqin
+ * Date: 2021/4/2
+ * Time: 11:15
+ * Vendor: yiidata.com
+ * To change this template use File | Settings | File Templates.
+ *
+ * </pre>
+ *
+ * @author zhenqin
+ */
+public class GuavaMemCached implements ICache<Object> {
+
+
+    /**
+     * 缓存实现
+     */
+    final Cache<String, Object> cached = CacheBuilder.newBuilder().maximumSize(10000).initialCapacity(500).build();
+
+
+    public GuavaMemCached() {
+    }
+
+    @Override
+    public void add(String key, Object value) {
+        cached.put(key, value);
+    }
+
+    @Override
+    public void add(String key, int exp, Object value) {
+        cached.put(key, value);
+    }
+
+    @Override
+    public Object get(String key) {
+        return cached.getIfPresent(key);
+    }
+
+    @Override
+    public Object remove(String key) {
+        cached.invalidate(key);
+        return null;
+    }
+
+    @Override
+    public int removeByPrefix(String prefix) {
+        return 0;
+    }
+
+    @Override
+    public void clear() {
+        cached.cleanUp();
+    }
+
+    @Override
+    public int size() {
+        return (int)cached.size();
+    }
+
+    @Override
+    public void shutdown() {
+        clear();
+    }
+}

+ 19 - 12
java-commons-cache/Redised.java

@@ -1,8 +1,9 @@
 package com.yiidata.intergration.web.modules.sys.cache;
 
-import com.yiidata.intergration.web.modules.sys.cache.serde.JdkSerializer;
-import com.yiidata.intergration.web.modules.sys.cache.serde.Serializer;
-import com.yiidata.intergration.web.modules.sys.cache.serde.StringSerializer;
+import com.primeton.damp.cache.serde.JdkSerializer;
+import com.primeton.damp.cache.serde.Serializer;
+import com.primeton.damp.cache.serde.StringSerializer;
+import org.apache.commons.lang.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import redis.clients.jedis.Jedis;
@@ -53,6 +54,7 @@ public class Redised implements ICache<Serializable>, Closeable {
     private final Serializer<Serializable> VALUE_SERDE = new JdkSerializer();
 
 
+
     public Redised() {
         this("localhost", 6379, 0);
     }
@@ -62,9 +64,23 @@ public class Redised implements ICache<Serializable>, Closeable {
         this(host, port, 0);
     }
 
+
     public Redised(String host, int port, int db){
+        this(host, port, null, db);
+    }
+
+
+    public Redised(String host, int port, String password){
+        this(host, port, password, 0);
+    }
+
+
+    public Redised(String host, int port, String password, int db){
         this.db = db;
         jedis = new Jedis(host, port);
+        if(StringUtils.isNotBlank(password)) {
+            jedis.auth(password);
+        }
         jedis.select(db);
     }
 
@@ -145,13 +161,4 @@ public class Redised implements ICache<Serializable>, Closeable {
 
         }
     }
-
-    public static void main(String[] args) {
-        Redised redis = new Redised("localhost", 6379);
-        int size = redis.size();
-        System.out.println(size);
-
-        Set<String> keys = redis.jedis.keys("listMetadataTreeNode*");
-        System.out.println(keys);
-    }
 }

+ 2 - 0
swagger-support/SwaggerConfig.java

@@ -2,6 +2,7 @@ package com.primeton.dgs.kernel.core.configure;
 
 
 import org.springframework.beans.factory.annotation.Value;
+import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
 import org.springframework.context.annotation.Bean;
 import org.springframework.context.annotation.Configuration;
 
@@ -31,6 +32,7 @@ import java.util.Optional;
  */
 @Configuration //必须存在
 @EnableSwagger2 //必须存在
+@ConditionalOnProperty(name = "swagger.enable", havingValue = "true", matchIfMissing = true)
 public class SwaggerConfig {
 
     @Value("${swagger.group}")