Browse Source

Merge branch 'master' of git.sagacloud.cn:persagy/persagy-dmp-server-doc

yaoll 4 years ago
parent
commit
2345d3b100
2 changed files with 159 additions and 2 deletions
  1. 8 0
      docs/alarm/alarm_api.md
  2. 151 2
      docs/dw/dw_deploy.md

+ 8 - 0
docs/alarm/alarm_api.md

@@ -340,6 +340,14 @@
 }
 ```
 
+支持的withColumns:
+
+> 接口返回值中默认不包含withColumns中的属性,只有在查询条件中添加withColumns,才能查询到withColumns中的属性。具体使用请参照[通用查询](/utils/query.md)
+
+```json5
+{"withColumns": ["ignoreTime", "treatEndTime"]}
+```
+
 返回值
 
 ```json

+ 151 - 2
docs/dw/dw_deploy.md

@@ -413,5 +413,154 @@
 
       1. 引入jar包,hive-jdbc-uber-2.6.5.0-292.jar(根据版本引入) hive-jdbc-uber-2.6.5.0-292.jar
       2. 配置如图:
-         
-         <img src="./image/image-20201201100958554.png" alt="image-20201201100958554" style="zoom: 50%;" />
+        
+         <img src="./image/image-20201201100958554.png" alt="image-20201201100958554" style="zoom: 50%;" />
+
+
+
+# Kafka安装
+
+1. 解压
+
+   ```shell
+   tar -zxvf kafka_2.11-0.11.0.0.tgz -C /opt/module/
+   ```
+
+2. 在kafka目录下创建logs文件
+
+3. 修改配置文件:/config/server.properties
+
+   ```xml
+   #broker的全局唯一编号,不能重复,hadoop02的broker.id=2,hadoop03的broker.id=3
+   broker.id=1
+   #删除topic功能使能
+   delete.topic.enable=true
+   #处理网络请求的线程数量
+   num.network.threads=3
+   #用来处理磁盘IO的现成数量
+   num.io.threads=8
+   #发送套接字的缓冲区大小
+   socket.send.buffer.bytes=102400
+   #接收套接字的缓冲区大小
+   socket.receive.buffer.bytes=102400
+   #请求套接字的缓冲区大小
+   socket.request.max.bytes=104857600
+   #kafka运行日志存放的路径
+   log.dirs=/opt/module/kafka/logs
+   #topic在当前broker上的分区个数
+   num.partitions=1
+   #用来恢复和清理data下数据的线程数量
+   num.recovery.threads.per.data.dir=1
+   #segment文件保留的最长时间,超时将被删除
+   log.retention.hours=168
+   #配置连接Zookeeper集群地址
+   zookeeper.connect=hadoop01:2181,hadoop02:2181,hadoop03:2181
+   ```
+
+
+
+# Hbase安装
+
+## 安装HBase
+
+1. 解压
+
+   ```shell
+   tar -zxvf HBase-1.3.1-bin.tar.gz -C /opt/module
+   ```
+
+2. 修改配置文件
+
+   1. HBase-env.sh
+
+      ```xml
+      export JAVA_HOME=/opt/module/jdk1.6.0_144
+      export HBASE_MANAGES_ZK=false
+      ```
+
+   2. HBase-site.xml
+
+      ```xml
+      <property>     
+      		<name>hbase.rootdir</name>     
+      		<value>hdfs://hadoop102:9000/HBase</value>   
+      	</property>
+      
+      	<property>   
+      		<name>hbase.cluster.distributed</name>
+      		<value>true</value>
+      	</property>
+      
+         <!-- 0.98后的新变动,之前版本没有.port,默认端口为60000 -->
+      	<property>
+      		<name>hbase.master.port</name>
+      		<value>16000</value>
+      	</property>
+      
+      	<property>    
+      		<name>hbase.zookeeper.quorum</name>
+      	     <value>hadoop102:2181,hadoop103:2181,hadoop104:2181</value>
+      	</property>
+      
+      	<property>   
+      		<name>hbase.zookeeper.property.dataDir</name>
+      	     <value>/opt/module/zookeeper-3.4.10/zkData</value>
+      	</property>
+      ```
+
+      
+
+   3. regionservers
+
+      ```xml
+      hadoop01
+      hadoop02
+      hadoop03
+      ```
+
+   4. 软连接hadoop配置文件到HBase
+
+      ```shell
+      ln -s /opt/module/hadoop-2.7.2/etc/hadoop/core-site.xml /opt/module/hbase-1.3.1/conf/core-site.xml
+      ln -s /opt/module/hadoop-2.7.2/etc/hadoop/hdfs-site.xml /opt/module/hbase-1.3.1/conf/hdfs-site.xml
+      ```
+
+   5. 分发 hbase 到其他节点
+
+## Hive与Hbase映射
+
+1. 拷贝 hive-HBase-handler-1.2.2.jar (此版本根据项目自行决定)到 Hive 的 lib 下
+
+2. 建立软连接
+
+   ```shell
+   export HBASE_HOME=/opt/module/HBase
+   export HIVE_HOME=/opt/module/hive
+   
+   ln -s $HBASE_HOME/lib/HBase-common-1.3.1.jar  $HIVE_HOME/lib/HBase-common-1.3.1.jar
+   ln -s $HBASE_HOME/lib/HBase-server-1.3.1.jar $HIVE_HOME/lib/HBase-server-1.3.1.jar
+   ln -s $HBASE_HOME/lib/HBase-client-1.3.1.jar $HIVE_HOME/lib/HBase-client-1.3.1.jar
+   ln -s $HBASE_HOME/lib/HBase-protocol-1.3.1.jar $HIVE_HOME/lib/HBase-protocol-1.3.1.jar
+   ln -s $HBASE_HOME/lib/HBase-it-1.3.1.jar $HIVE_HOME/lib/HBase-it-1.3.1.jar
+   ln -s $HBASE_HOME/lib/htrace-core-3.1.0-incubating.jar $HIVE_HOME/lib/htrace-core-3.1.0-incubating.jar
+   ln -s $HBASE_HOME/lib/HBase-hadoop2-compat-1.3.1.jar $HIVE_HOME/lib/HBase-hadoop2-compat-1.3.1.jar
+   ln -s $HBASE_HOME/lib/HBase-hadoop-compat-1.3.1.jar $HIVE_HOME/lib/HBase-hadoop-compat-1.3.1.jar
+   ```
+
+3. 修改配置文件hive-site.xml
+
+   ```xml
+   <property>
+     <name>hive.zookeeper.quorum</name>
+     <value>hadoop01,hadoop02,hadoop03</value>
+     <description>The list of ZooKeeper servers to talk to. This is only needed for read/write locks.</description>
+   </property>
+   <property>
+     <name>hive.zookeeper.client.port</name>
+     <value>2181</value>
+     <description>The port of ZooKeeper servers to talk to. This is only needed for read/write locks.</description>
+   </property>
+   ```
+
+   
+