'代码提交'

dev
TANGWY 4 months ago
parent 0dfc4dcac4
commit e77a5ec76f
  1. 65
      business/es_query.py
  2. BIN
      conf/__init__.pyc
  3. 38
      conf/defaultRule.json
  4. 48
      corn/ueba_corn.py
  5. 72
      corn/ueba_corn_pg.py
  6. 5
      install.py
  7. 13
      jobs/jobmeta.json
  8. 4
      lib/result.py
  9. 0
      mock/__init__.py
  10. 18
      test.py
  11. 4
      urls.py
  12. 292
      utils/base_dataclean.py
  13. 227
      utils/base_dataclean_pg.py
  14. 15
      utils/config.py
  15. 281
      utils/dashboard_data.py
  16. 270
      utils/dashboard_data_conversion.py
  17. 218
      utils/dashboard_data_pg.py
  18. 109
      utils/db2json.py
  19. 415
      utils/esUtil.py
  20. 90
      utils/es_operation.py
  21. 2
      utils/ext_logging.py
  22. 68
      views/dashboard_views.py
  23. 801
      views/mock.py

@ -1,65 +0,0 @@
#!/usr/bin/python
#encoding=utf-8
# author: tangwy
import json
import os,re
import codecs
import csv
import ConfigParser
from ipaddr import IPRange
from elasticsearch import Elasticsearch
conf_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'conf')
ini_path = os.path.join(conf_path, 'conf.ini')
config = ConfigParser.ConfigParser()
config.read(ini_path)
ES_HOST = config.get('COMMON', 'es_host')
ES_PER_COUNT = config.get('COMMON', 'es_per_count')
ES_INDEX_NAME = config.get('COMMON', 'es_index_name')
CSV_FILE_PATH = config.get('COMMON', 'csv_file_path')
# scroll查询数据
def get_es_data(start_time,end_time):
es = Elasticsearch(ES_HOST)
msg = es.search(index=ES_INDEX_NAME,scroll="3m",size=ES_PER_COUNT,_source_includes= ["cookies","url","sip","dip"], query={
"bool": {
"filter": {
"range": {
"timestamp": {
"gte": start_time,
"lte": end_time
}
}
}
}
})
result = msg['hits']['hits']
total = msg['hits']['total']
scroll_id = msg['_scroll_id']
for i in range(0,int(total["value"]/ES_PER_COUNT)+1):
query_scroll = es.scroll(scroll_id=scroll_id, scroll='3m')["hits"]["hits"]
result += query_scroll
return result
# 读取csv文件 获取ip归属地
def get_ip_area_relation(csv_file_path):
iprange_map = {}
with codecs.open(csv_file_path, mode='r',encoding='utf-8') as file:
csv_reader = csv.reader(file)
for row in csv_reader:
headers = next(csv_reader)
ip_start = headers[0]
ip_end = headers[1]
ip_range = IPRange(ip_start, ip_end)
ip_area = headers[5]
print (ip_area)
for ip in ip_range:
iprange_map[ip] = ip_area
return iprange_map
get_ip_area_relation("/tmp/data/ip_area_relation.csv")

Binary file not shown.

@ -0,0 +1,38 @@
{
"white_list": {
"ip": [
400000,
400001
],
"account": [
400000,
400001
],
"interface": [
400000,
400001
],
"menu": [
400000,
400001
]
},
"grey_list": {
"ip": [
400000,
400001
],
"account": [
400000,
400001
],
"interface": [
400000,
400001
],
"menu": [
400000,
400001
]
}
}

@ -0,0 +1,48 @@
# coding=utf-8
"""
@Author: fu-zhe
@FileName: user_cron.py
@DateTime: 2024/5/23 14:19
@Description: 用户相关的定时任务 全量获取对已有数据进行先删除后写入 周期一天
"""
from __future__ import unicode_literals
import collections
import json
import time,datetime
from datetime import datetime, timedelta
import calendar
from uebaMetricsAnalysis.utils.ext_logging import logger
from uebaMetricsAnalysis.utils.base_dataclean import entry
class UserCron:
def get_index_name():
baseIndex ="a_ueba_analyse_"
# 获取当前日期
now = datetime.now()
# 转到上一个月
last_month = now - timedelta(days=calendar.monthrange(now.year, now.month)[1])
last_month_year = last_month.year
last_month_month = last_month.month
if last_month_month < 10:
last_month_month = '0' + str(last_month_month)
return baseIndex + str(last_month_year) + '_' + str(last_month_month)
def processing(self):
try:
#写入的索引 按月创建,注意跨天的场景
write_index= self.get_index_name()
read_index ="bsa_traffic*"
#任务执行时间是每天 凌晨12点
#查询的范围 开始时间前一天的0点0分0秒,结束时间是 前一天的23.59.59秒
start = datetime.datetime.now()
end = datetime.datetime.now()
entry(write_index,read_index,start,end)
logger.info("start processing")
except Exception ,e:
logger.error("定时任务执行失败:".format(str(e), traceback.format_exc()))
raise

@ -0,0 +1,72 @@
# coding=utf-8
"""
@Author: fu-zhe
@FileName: user_cron.py
@DateTime: 2024/5/23 14:19
@Description: 用户相关的定时任务 全量获取对已有数据进行先删除后写入 周期一天
"""
from __future__ import unicode_literals
import collections
import random,string
import time,datetime
from datetime import datetime, timedelta
import calendar
from uebaMetricsAnalysis.utils.ext_logging import logger
from commandCyberRange.utils.db2json import DBUtils, DBType
from uebaMetricsAnalysis.utils.base_dataclean import entry
JOB_STATUS ={
"RUNNING":1,
"FINISH":2,
"ERROR":3
}
class UserCron:
def generate_job_id():
timestamp = int(time.time() * 1000)
random_letters = ''.join(random.choice(string.ascii_letters) for _ in range(7))
return str(timestamp) + random_letters
#获取 job的执行时间 开始时间-结束时间
def get_job_period(self):
sql = "select job_id, end_time from ueba_clean_jobs order by end_time desc limit 1"
fields=["job_id", "end_time"]
data = DBUtils.transition(fields, sql, DBType.LIST)
start_time = ''
end_time = ''
if len(data)==0:
start_time = datetime.datetime.now() - timedelta(minutes=5)
end_time = datetime.datetime.now()
if len(data)>0:
start_time = data[0].get('end_time')
end_time = data[0].get('end_time') + timedelta(minutes=5)
if end_time > datetime.datetime.now():
return None,None
return start_time,end_time
#每5分钟执行一次
def processing(self):
try:
logger.info("job:开始执行")
start,end=self.get_job_period()
job_id =self.generate_job_id()
DBUtils.insert_job_record(job_id,start,end,JOB_STATUS.get("RUNNING"))
logger.info("job:运行参数:{}".format(start,end))
if start is None or end is None:
logger.info("job:结束时间大于服务器时间不执行")
return
logger.info("job:"+"准备获取es数据")
entry(start,end)
logger.info("job:"+"执行完成")
DBUtils.write_job_status(job_id,JOB_STATUS.get("FINISH"),"")
except Exception ,e:
err_info="定时任务执行失败:".format(str(e), traceback.format_exc())
logger.error(err_info)
DBUtils.write_job_status(job_id,JOB_STATUS.get("ERROR"),err_info)
raise
if __name__ == '__main__':
UserCron().processing()

@ -15,7 +15,7 @@ BASE_PATH = os.path.split(os.path.realpath(__file__))[0]
DSTPATH = sys.argv[1] DSTPATH = sys.argv[1]
ISUPDATE = sys.argv[2] ISUPDATE = sys.argv[2]
CUR_PATH = os.path.normpath(os.path.dirname(os.path.abspath(__file__))) CUR_PATH = os.path.normpath(os.path.dirname(os.path.abspath(__file__)))
APP_NAME = 'UebaMetricsAnalysis' APP_NAME = 'uebaMetricsAnalysis'
def get_logger(logger_name=APP_NAME, logger_level=logging.INFO): def get_logger(logger_name=APP_NAME, logger_level=logging.INFO):
"""日志""" """日志"""
@ -37,7 +37,7 @@ def get_logger(logger_name=APP_NAME, logger_level=logging.INFO):
return logger return logger
logger = get_logger("UebaMetricsAnalysis") logger = get_logger("uebaMetricsAnalysis")
def installDBSchema(exec_sql): def installDBSchema(exec_sql):
@ -58,7 +58,6 @@ def installDBSchema(exec_sql):
except Exception as e: except Exception as e:
logger.info(str(e)) logger.info(str(e))
def add_task(): def add_task():
# 组件批量注册任务 # 组件批量注册任务
ch = ComponentHelper(APP_NAME) ch = ComponentHelper(APP_NAME)

@ -0,0 +1,13 @@
[
{
"task_name": "ueba_corn",
"task_type": 1,
"exec_cmd": "python /home/master/ISOP/apps/uebaMetricsAnalysis/corn/ueba_corn.py",
"task_owner": "uebaMetricsAnalysis",
"run_mode": 1,
"duration_args": "*/2 * * * * *",
"retry_nums": 5,
"is_enable": 1,
"task_description": "每天执行一次 清洗数据到es-ueba索引"
}
]

@ -10,8 +10,8 @@ class Result(object):
""" """
结果对象 用于返回给 JsonResponse 结果对象 用于返回给 JsonResponse
""" """
SUCCESS_CODE = 0 # 成功的状态码 SUCCESS_CODE = 200 # 成功的状态码
FAILED_CODE = 1 # 失败的状态码 FAILED_CODE = 500 # 失败的状态码
SUCCESS_MESSAGE = 'success' # 返回单个数据的message SUCCESS_MESSAGE = 'success' # 返回单个数据的message
SUCCESS_MESSAGE_LIST = '请求成功' # 返回list的message SUCCESS_MESSAGE_LIST = '请求成功' # 返回list的message

@ -0,0 +1,18 @@
#encoding=utf-8
import json
from isoc.utils.esUtil import EsUtil
def createIndex():
map={
"field1": "text",
"field2": "text"
}
es_util_instance = EsUtil()
res = es_util_instance.create_index_simple("bsa_traffic*",3,scroll_search)
return res
res = createIndex()
print(res)

@ -1,7 +1,7 @@
# coding:utf-8 # coding:utf-8
from django.conf.urls import url from django.conf.urls import url
from UebaMetricsAnalysis.views import dashboard_views from uebaMetricsAnalysis.views import dashboard_views
from rest_framework import routers from rest_framework import routers
@ -9,6 +9,6 @@ urlpatterns = [
] ]
router = routers.DefaultRouter() router = routers.DefaultRouter()
router.register(r'/rule_info', dashboard_views.DashboardViewSets,base_name="dashboard-view") router.register(r'/ueba_metrics', dashboard_views.DashboardViewSets,base_name="dashboard-view")
urlpatterns += router.urls urlpatterns += router.urls

@ -0,0 +1,292 @@
#encoding=utf-8
import json
import time,datetime
import traceback
from datetime import datetime, timedelta
import calendar
from esUtil import EsUtil
import pytz
size = 1000# 可以根据实际情况调整
##01 创建索引
def createIndex(index):
map={
"data_type":"keyword",
"req_account":"keyword",
"req_frequency":"integer",
"req_jobnum":"keyword",
"interface_addr":"keyword",
"req_ip":"ip",
"menu_name":"keyword",
"date_time":"date"
}
es_util_instance = EsUtil()
reqs = es_util_instance.is_index_exist(index)
if reqs =="false":
try:
res = es_util_instance.create_index_simple(index,map)
except Exception,e:
print e.message
## IP维度
def get_ip_group_data(index,startTime,endTime):
try:
query_body={
"size": 0,
"query": {
"range": {"timestamp": {"gte": startTime,"lte": endTime}}
},
"aggs": {
"composite_buckets": {
"composite": {
"size": size,
"sources": [
{"sip": { "terms": {"field": "sip"} }},
{"trojan_type": { "terms": { "field": "trojan_type"}}}
]
}
}
}
}
after_key = None
es_util_instance = EsUtil()
datas=[]
while True:
if after_key:
query_body["aggs"]["composite_buckets"]["composite"]["after"] = after_key
try:
response = es_util_instance.search(index,query_body)
except Exception,e:
print "err"
for bucket in response["aggregations"]["composite_buckets"]["buckets"]:
data = {
"data_type": "ip",
"req_account": "",
"req_frequency": bucket['doc_count'],
"req_jobnum": bucket['key']['trojan_type'] ,
"interface_addr": "",
"req_ip":bucket['key']['sip'] ,
"menu_name": "",
"date_time": int(time.time() * 1000) # 当前时间,使用isoformat格式化
}
datas.append(data)
after_key = bucket["key"]
if not response["aggregations"]["composite_buckets"].get("after_key"):
break
after_key = response["aggregations"]["composite_buckets"]["after_key"]
except Exception,e:
print "x_err:"+e.message
return datas
## 账号维度
def get_account_group_data(index,startTime,endTime):
query_body={
"size": 0,
"query": {
"range": {"timestamp": {"gte": startTime,"lte": endTime}}
},
"aggs": {
"composite_buckets": {
"composite": {
"size": size,
"sources": [
{"account": { "terms": {"field": "account"} }},
{"trojan_type": { "terms": { "field": "trojan_type"}}}
]
}
}
}
}
after_key = None
es_util_instance = EsUtil()
datas=[]
while True:
if after_key:
query_body["aggs"]["composite_buckets"]["composite"]["after"] = after_key
response = es_util_instance.search(index,query_body)
for bucket in response["aggregations"]["composite_buckets"]["buckets"]:
#print(bucket['key']['sip'] + ":" + str(bucket['doc_count']))
data = {
"data_type": "account",
"req_account": bucket['key']['account'],
"req_frequency": bucket['doc_count'],
"req_jobnum": bucket['key']['trojan_type'] ,
"interface_addr": "",
"req_ip":"0.0.0.0" ,
"menu_name": "",
"date_time": int(time.time() * 1000) # 当前时间,使用isoformat格式化
}
datas.append(data)
after_key = bucket["key"]
if not response["aggregations"]["composite_buckets"].get("after_key"):
break
after_key = response["aggregations"]["composite_buckets"]["after_key"]
return datas
## 接口维度
def get_interface_group_data(index,startTime,endTime):
query_body={
"size": 0,
"query": {
"range": {"timestamp": {"gte": startTime,"lte": endTime}}
},
"aggs": {
"composite_buckets": {
"composite": {
"size": size,
"sources": [
{"interface": { "terms": {"field": "interface"} }},
{"sip": { "terms": { "field": "sip"}}},
{"account": { "terms": { "field": "account"}}},
{"trojan_type": { "terms": { "field": "trojan_type"}}},
]
}
}
}
}
after_key = None
es_util_instance = EsUtil()
datas=[]
while True:
if after_key:
query_body["aggs"]["composite_buckets"]["composite"]["after"] = after_key
response = es_util_instance.search(index,query_body)
for bucket in response["aggregations"]["composite_buckets"]["buckets"]:
#print(bucket['key']['sip'] + ":" + str(bucket['doc_count']))
data = {
"data_type": "interface",
"req_account": bucket['key']['account'],
"req_frequency": bucket['doc_count'],
"req_jobnum": bucket['key']['trojan_type'] ,
"interface_addr": bucket['key']['interface'] ,
"req_ip":bucket['key']['sip'],
"menu_name": "",
"date_time": int(time.time() * 1000) # 当前时间,使用isoformat格式化
}
datas.append(data)
after_key = bucket["key"]
if not response["aggregations"]["composite_buckets"].get("after_key"):
break
after_key = response["aggregations"]["composite_buckets"]["after_key"]
return datas
## 菜单维度
def get_menu_group_data(index,startTime,endTime):
query_body={
"size": 0,
"query": {
"range": {"timestamp": {"gte": startTime,"lte": endTime}}
},
"aggs": {
"composite_buckets": {
"composite": {
"size": size,
"sources": [
{"worm_family": { "terms": {"field": "worm_family"} }},
{"sip": { "terms": { "field": "sip"}}},
{"account": { "terms": { "field": "account"}}},
{"trojan_type": { "terms": { "field": "trojan_type"}}},
]
}
}
}
}
after_key = None
es_util_instance = EsUtil()
datas=[]
while True:
if after_key:
query_body["aggs"]["composite_buckets"]["composite"]["after"] = after_key
response = es_util_instance.search(index,query_body)
for bucket in response["aggregations"]["composite_buckets"]["buckets"]:
#print(bucket['key']['sip'] + ":" + str(bucket['doc_count']))
data = {
"data_type": "menu",
"req_account": bucket['key']['account'],
"req_frequency": bucket['doc_count'],
"req_jobnum": bucket['key']['trojan_type'] ,
"interface_addr": "" ,
"req_ip":bucket['key']['sip'],
"menu_name": bucket['key']['worm_family'],
"date_time": int(time.time() * 1000) # 当前时间,使用isoformat格式化
}
datas.append(data)
after_key = bucket["key"]
if not response["aggregations"]["composite_buckets"].get("after_key"):
break
after_key = response["aggregations"]["composite_buckets"]["after_key"]
return datas
##03 数据写入
def data_insert(index,data):
es_util_instance = EsUtil()
response = es_util_instance.bulk_insert(index,data)
return response
def clean_data(write_index,read_index,start,end):
data_ip = get_ip_group_data(read_index,start,end)
print "data_ip:"+str(len(data_ip))
data_account = get_account_group_data(read_index,start,end)
print "data_ip:"+str(len(data_account))
data_interface = get_interface_group_data(read_index,start,end)
print "data_ip:"+str(len(data_interface))
data_menu = get_menu_group_data(read_index,start,end)
print "data_ip:"+str(len(data_menu))
res_data = data_ip+data_account+data_interface+data_menu
response = data_insert(write_index,res_data)
print json.dumps(response)
#入口
def entry(write_index,read_index,start,end):
createIndex(write_index)
clean_data(write_index,read_index,start,end)
#前一天的0点0分0秒
def get_start_end_time(hour,minute,second):
# 获取当前日期时间
now = datetime.now()
# 计算昨天的日期时间
yesterday = now - timedelta(days=1)
# 将时间部分设为 00:00:00
yesterday_midnight = yesterday.replace(hour=hour, minute=minute, second=second, microsecond=0)
# 使用 pytz 来获取 UTC 时区对象
utc = pytz.utc
# 将时间对象本地化为 UTC 时区
yesterday_midnight_utc = utc.localize(yesterday_midnight)
# 格式化为带时区的字符串(ISO 8601格式)
formatted_date = yesterday_midnight_utc.strftime("%Y-%m-%dT%H:%M:%SZ")
return formatted_date
def index():
try:
#写入的索引 按月创建,注意跨天的场景
write_index= "b_ueba_2024_07"
read_index ="bsa_traffic*"
#任务执行时间是每天 凌晨12点
#查询的范围 开始时间前一天的0点0分0秒,结束时间是 前一天的23.59.59秒
start = "2024-06-02T00:00:00Z"#get_start_end_time(0,0,0)
end = get_start_end_time(23,59,59)
print start +":"+ end
entry(write_index,read_index,start,end)
except Exception ,e:
print "定时任务执行失败:"+traceback.format_exc()
# logger.error("定时任务执行失败:".format(str(e), traceback.format_exc()))
index()

@ -0,0 +1,227 @@
#encoding=utf-8
import json
import time,datetime
import traceback
from datetime import datetime, timedelta
import calendar
from esUtil import EsUtil
import pytz
size = 1000# 可以根据实际情况调整
DATA_TYPE = {
"IP": 1,
"ACCOUNT": 2,
"INTERFACE": 3,
"MENU": 4,
}
## IP维度
def get_ip_group_data(index,startTime,endTime):
try:
query_body={
"size": 0,
"query": {
"range": {"timestamp": {"gte": startTime,"lte": endTime}}
},
"aggs": {
"composite_buckets": {
"composite": {
"size": size,
"sources": [
{"sip": { "terms": {"field": "sip"} }},
{"trojan_type": { "terms": { "field": "trojan_type"}}}
]
}
}
}
}
after_key = None
es_util_instance = EsUtil()
datas=[]
while True:
if after_key:
query_body["aggs"]["composite_buckets"]["composite"]["after"] = after_key
try:
response = es_util_instance.search(index,query_body)
except Exception,e:
print "err"
for bucket in response["aggregations"]["composite_buckets"]["buckets"]:
data = {
"data_type": DATA_TYPE.get("IP"),
"count": bucket['doc_count'],
"jobnum": bucket['key']['trojan_type'] ,
"ip":bucket['key']['sip']
}
datas.append(data)
after_key = bucket["key"]
if not response["aggregations"]["composite_buckets"].get("after_key"):
break
after_key = response["aggregations"]["composite_buckets"]["after_key"]
except Exception,e:
print "x_err:"+e.message
return datas
## 账号维度
def get_account_group_data(index,startTime,endTime):
query_body={
"size": 0,
"query": {
"range": {"timestamp": {"gte": startTime,"lte": endTime}}
},
"aggs": {
"composite_buckets": {
"composite": {
"size": size,
"sources": [
{"account": { "terms": {"field": "account"} }},
{"trojan_type": { "terms": { "field": "trojan_type"}}}
]
}
}
}
}
after_key = None
es_util_instance = EsUtil()
datas=[]
while True:
if after_key:
query_body["aggs"]["composite_buckets"]["composite"]["after"] = after_key
response = es_util_instance.search(index,query_body)
for bucket in response["aggregations"]["composite_buckets"]["buckets"]:
#print(bucket['key']['sip'] + ":" + str(bucket['doc_count']))
data = {
"data_type": DATA_TYPE.get("ACCOUNT"),
"account": bucket['key']['account'],
"count": bucket['doc_count'],
"jobnum": bucket['key']['trojan_type']
}
datas.append(data)
after_key = bucket["key"]
if not response["aggregations"]["composite_buckets"].get("after_key"):
break
after_key = response["aggregations"]["composite_buckets"]["after_key"]
return datas
## 接口维度
def get_interface_group_data(index,startTime,endTime):
query_body={
"size": 0,
"query": {
"range": {"timestamp": {"gte": startTime,"lte": endTime}}
},
"aggs": {
"composite_buckets": {
"composite": {
"size": size,
"sources": [
{"interface": { "terms": {"field": "interface"} }},
{"sip": { "terms": { "field": "sip"}}},
{"account": { "terms": { "field": "account"}}},
{"trojan_type": { "terms": { "field": "trojan_type"}}},
]
}
}
}
}
after_key = None
es_util_instance = EsUtil()
datas=[]
while True:
if after_key:
query_body["aggs"]["composite_buckets"]["composite"]["after"] = after_key
response = es_util_instance.search(index,query_body)
for bucket in response["aggregations"]["composite_buckets"]["buckets"]:
#print(bucket['key']['sip'] + ":" + str(bucket['doc_count']))
data = {
"data_type": DATA_TYPE.get("INTERFACE"),
"account": bucket['key']['account'],
"count": bucket['doc_count'],
"jobnum": bucket['key']['trojan_type'] ,
"interface": bucket['key']['interface'] ,
"ip":bucket['key']['sip']
}
datas.append(data)
after_key = bucket["key"]
if not response["aggregations"]["composite_buckets"].get("after_key"):
break
after_key = response["aggregations"]["composite_buckets"]["after_key"]
return datas
## 菜单维度
def get_menu_group_data(index,startTime,endTime):
query_body={
"size": 0,
"query": {
"range": {"timestamp": {"gte": startTime,"lte": endTime}}
},
"aggs": {
"composite_buckets": {
"composite": {
"size": size,
"sources": [
{"worm_family": { "terms": {"field": "worm_family"} }},
{"sip": { "terms": { "field": "sip"}}},
{"account": { "terms": { "field": "account"}}},
{"trojan_type": { "terms": { "field": "trojan_type"}}},
]
}
}
}
}
after_key = None
es_util_instance = EsUtil()
datas=[]
while True:
if after_key:
query_body["aggs"]["composite_buckets"]["composite"]["after"] = after_key
response = es_util_instance.search(index,query_body)
for bucket in response["aggregations"]["composite_buckets"]["buckets"]:
#print(bucket['key']['sip'] + ":" + str(bucket['doc_count']))
data = {
"data_type": DATA_TYPE.get("MENU"),
"account": bucket['key']['account'],
"count": bucket['doc_count'],
"jobnum": bucket['key']['trojan_type'] ,
"ip":bucket['key']['sip'],
"menu_name": bucket['key']['worm_family'],
}
datas.append(data)
after_key = bucket["key"]
if not response["aggregations"]["composite_buckets"].get("after_key"):
break
after_key = response["aggregations"]["composite_buckets"]["after_key"]
return datas
def clean_data(read_index,start,end):
data_ip = get_ip_group_data(read_index,start,end)
print "data_ip:"+str(len(data_ip))
data_account = get_account_group_data(read_index,start,end)
print "data_ip:"+str(len(data_account))
data_interface = get_interface_group_data(read_index,start,end)
print "data_ip:"+str(len(data_interface))
data_menu = get_menu_group_data(read_index,start,end)
print "data_ip:"+str(len(data_menu))
res_data = data_ip+data_account+data_interface+data_menu
#todo 读取上一次5分钟的文件,与这5分钟的文件合并
#合并完成后 写文件
#入口
def entry(start,end):
base_index ="bsa_traffic*"
es_util_instance = EsUtil()
res=es_util_instance.get_available_index_name(start,end,base_index)
if len(res)==0:
return
index =",".join(res)
clean_data(index,start,end)

@ -0,0 +1,15 @@
# coding=utf-8
"""
@Author: tangwy
@FileName: config
@DateTime: 2024/6/14 14:27
@Description:
"""
import json,os
def read_json_config(file_path):
if not os.path.exists(file_path):
raise IOError("The configuration file does not exist: {}".format(file_path))
with open(file_path, 'r') as f:
config = json.load(f)
return config

@ -0,0 +1,281 @@
#!/usr/bin/python
#encoding=utf-8
# author: tangwy
import json
import os,re
import codecs
import traceback
from isoc.utils.esUtil import EsUtil
from dashboard_data_conversion import ip_summary_data_format, account_summary_data_format, \
interface_summary_data_format, menu_summary_data_format
from ext_logging import logger
## IP维度
def es_get_ip_group_data(index,startTime,endTime):
page_size = 9000 #可以根据实际情况调整
query_body={
"query": {
"bool": {
"filter": [
{ "term": { "data_type": "ip" } },
{"range":{
"date_time": {
"gte": startTime,
"lte": endTime
}
}}
]
}
},
"aggs": {
"composite_buckets": {
"composite": {
"size" : page_size,
"sources": [
{ "req_ip": { "terms": { "field": "req_ip" } } },
{ "req_jobnum": { "terms": { "field": "req_jobnum" } } }
]
},
"aggregations": {
"total_count": {
"sum": {
"field": "req_frequency"
}
}
}
}
}
}
after_key = None
es_util_instance = EsUtil()
datas = []
while True:
if after_key:
query_body["aggs"]["composite_buckets"]["composite"]["after"] = after_key
response = es_util_instance.search(index,query_body)
buckets = response.get("aggregations", {}).get("composite_buckets", {}).get("buckets", [])
for bucket in buckets:
data= {
"ip":bucket['key']['req_ip'],
"jobnum":bucket['key']['req_jobnum'],
"count":bucket['total_count']['value']
}
datas.append(data)
after_key = bucket["key"]
if not response["aggregations"]["composite_buckets"].get("after_key"):
break
after_key = response["aggregations"]["composite_buckets"]["after_key"]
return datas
## 账号维度
def es_get_account_group_data(index,startTime,endTime):
page_size = 9000 #可以根据实际情况调整
query_body={
"size": 0,
"query": {
"bool": {
"filter": [
{ "term": { "data_type": "account" } },
{"range":{
"date_time": {
"gte": startTime,
"lte": endTime
}
}}
]
}
},
"aggs": {
"composite_buckets": {
"composite": {
"size" : page_size,
"sources": [
{ "req_account": { "terms": { "field": "req_account" } } },
{ "req_jobnum": { "terms": { "field": "req_jobnum" } } }
]
},
"aggregations": {
"total_count": {
"sum": {
"field": "req_frequency"
}
}
}
}
}
}
after_key = None
es_util_instance = EsUtil()
datas = []
while True:
if after_key:
query_body["aggs"]["composite_buckets"]["composite"]["after"] = after_key
response = es_util_instance.search(index,query_body)
buckets = response.get("aggregations", {}).get("composite_buckets", {}).get("buckets", [])
for bucket in buckets:
data= {
"account":bucket['key']['req_account'],
"jobnum":bucket['key']['req_jobnum'],
"count":bucket['total_count']['value']
}
datas.append(data)
after_key = bucket["key"]
if not response["aggregations"]["composite_buckets"].get("after_key"):
break
after_key = response["aggregations"]["composite_buckets"]["after_key"]
return datas
## 菜单维度
def es_get_menu_group_data(index,startTime,endTime):
page_size = 9000 #可以根据实际情况调整
query_body={
"size": 0,
"query": {
"bool": {
"filter": [
{ "term": { "data_type": "menu" } },
{"range":{
"date_time": {
"gte": startTime,
"lte": endTime
}
}}
]
}
},
"aggs": {
"composite_buckets": {
"composite": {
"size" : page_size,
"sources": [
{ "menu_name": { "terms": { "field": "menu_name" } } },
{ "req_account": { "terms": { "field": "req_account" } } },
{ "req_ip": { "terms": { "field": "req_ip" } } },
{ "req_jobnum": { "terms": { "field": "req_jobnum" } } }
]
},
"aggregations": {
"total_count": {
"sum": {
"field": "req_frequency"
}
}
}
}
}
}
after_key = None
es_util_instance = EsUtil()
datas = []
while True:
if after_key:
query_body["aggs"]["composite_buckets"]["composite"]["after"] = after_key
response = es_util_instance.search(index,query_body)
buckets = response.get("aggregations", {}).get("composite_buckets", {}).get("buckets", [])
for bucket in buckets:
data= {
"menu":bucket['key']['menu_name'],
"ip":bucket['key']['req_ip'],
"account":bucket['key']['req_account'],
"jobnum":bucket['key']['req_jobnum'],
"count":bucket['total_count']['value']
}
datas.append(data)
after_key = bucket["key"]
if not response["aggregations"]["composite_buckets"].get("after_key"):
break
after_key = response["aggregations"]["composite_buckets"]["after_key"]
return datas
## 接口维度
def es_get_interface_group_data(index,startTime,endTime):
page_size = 9999 #可以根据实际情况调整
query_body={
"query": {
"bool": {
"filter": [
{ "term": { "data_type": "interface" } },
{"range":{
"date_time": {
"gte": startTime,
"lte": endTime
}
}}
]
}
},
"aggs": {
"group_by_menu": {
"composite": {
"size" : page_size,
"sources": [
{ "interface_addr": { "terms": { "field": "interface_addr" } } },
{ "req_account": { "terms": { "field": "req_account" } } },
{ "req_ip": { "terms": { "field": "req_ip" } } },
{ "req_jobnum": { "terms": { "field": "req_jobnum" } } }
]
},
"aggregations": {
"total_count": {
"sum": {
"field": "req_frequency"
}
}
}
}
}
}
after_key = None
es_util_instance = EsUtil()
datas = []
while True:
if after_key:
query_body["aggs"]["composite_buckets"]["composite"]["after"] = after_key
response = es_util_instance.search(index,query_body)
buckets = response.get("aggregations", {}).get("composite_buckets", {}).get("buckets", [])
for bucket in buckets:
data= {
"interface":bucket['key']['interface_addr'],
"ip":bucket['key']['req_ip'],
"account":bucket['key']['req_account'],
"jobnum":bucket['key']['req_jobnum'],
"count":bucket['total_count']['value']
}
datas.append(data)
after_key = bucket["key"]
if not response["aggregations"]["composite_buckets"].get("after_key"):
break
after_key = response["aggregations"]["composite_buckets"]["after_key"]
return datas
def entry(data_type,start,end):
base_index = 'c_ueba_001'
# es_util_instance = EsUtil()
# res=es_util_instance.get_available_index_name(start,end,base_index)
# if len(res)==0:
# return
# index =",".join(res)
index=base_index
try:
data = {}
if data_type == "1":
ip_summary_data = es_get_ip_group_data(index, start, end)
data = ip_summary_data_format(ip_summary_data)
if data_type == "2":
account_summary_data = es_get_account_group_data(index, start, end)
data = account_summary_data_format(account_summary_data)
if data_type == "3":
interface_summary_data = es_get_interface_group_data(index, start, end)
data = interface_summary_data_format(interface_summary_data)
if data_type == "4":
menu_summary_data = es_get_menu_group_data(index, start, end)
data = menu_summary_data_format(menu_summary_data)
return data
except Exception, e:
logger.error(traceback.format_exc())
raise e

@ -0,0 +1,270 @@
# coding=utf-8
from __future__ import division
import json
from collections import defaultdict
jobnum_region_dict = {
"10": "省公司",
"110": "武汉分公司",
"170": "襄阳分公司",
"130": "鄂州分公司",
"260": "孝感分公司",
"250": "黄冈分公司",
"120": "黄石分公司",
"190": "咸宁分公司",
"200": "荆州分公司",
"140": "宜昌分公司",
"150": "恩施分公司",
"160": "十堰分公司",
"240": "随州分公司",
"230": "荆门分公司",
"1801": "江汉分公司",
"1802": "潜江分公司",
"1803": "天门分公司"
}
def keep_digits_filter(code):
"""
工号可能有字面去掉字面以数字起算
"""
return ''.join(filter(str.isdigit, str(code)))
def find_region_by_code(code, region_dict):
"""
查询工号对应公司
未查询到 返回错误工号
"""
code_str = keep_digits_filter(code)
# 使用生成器表达式和next函数尝试找到匹配的前缀
company = next(
(region_dict.get(code_str[:i]) for i in range(2, min(len(code_str), 5)) if code_str[:i] in region_dict),
"错误工号")
return company
def ip_summary_data_format(ip_summary_data):
"""
ip维度数据转换方法
"""
result = {"summary": {"ip": []}, "detail": {"ip": {}}}
grouped_data = defaultdict(lambda: {"reqs": 0, "ips": set()})
ip_detail_dict = defaultdict(lambda: defaultdict(lambda: {"req_frequency": 0}))
# 全部账号元组
ips_total = set()
for ip_data in ip_summary_data:
company = find_region_by_code(ip_data["jobnum"], jobnum_region_dict)
count = ip_data["count"]
ip = ip_data["ip"]
jobnum = ip_data["jobnum"]
ip_detail_dict_key = "{}{}".format(ip, jobnum)
# 更新统计数据
grouped_data[company]["reqs"] += count
grouped_data[company]["ips"].add(ip)
ips_total.add(ip)
# 构建下钻详情
ip_detail_dict[company][ip_detail_dict_key]["req_ip"] = ip
ip_detail_dict[company][ip_detail_dict_key]["req_jobnum"] = jobnum
ip_detail_dict[company][ip_detail_dict_key]["req_frequency"] += count
# 统计总请求次数和独立IP数
reqs_total = sum(data["reqs"] for data in grouped_data.values())
# 请求为0抛出
if reqs_total == 0:
return result
# 构建summary部分
ip_data_list = [
{
"company": company,
"req_frequency": data["reqs"],
# 本公司的 请求次数/所有公司 请求次数的合计
"frequency_rate": round(data["reqs"] / reqs_total, 4),
"ip_count": len(data["ips"]),
# 本公司的 ip个数/所有公司 ip个数的合计
"ip_rate": round(len(data["ips"]) / len(ips_total), 4),
# 本公司的 请求次数/本公司 ip个数的合计
"ip_avg": data["reqs"] // len(data["ips"]),
}
for company, data in grouped_data.items()
]
result["summary"]["ip"] = sorted(ip_data_list, key=lambda x: x["req_frequency"], reverse=True)
# 构建detail部分
result["detail"]["ip"] = {
company: sorted(data.values(), key=lambda x: x['req_frequency'], reverse=True)
for company, data in ip_detail_dict.items()
}
return result
def account_summary_data_format(account_summary_data):
"""
账号维度数据转换方法
"""
result = {"summary": {"account": []}, "detail": {"account": {}}}
grouped_data = defaultdict(lambda: {"reqs": 0, "accounts": set()})
account_detail_dict = defaultdict(lambda: defaultdict(lambda: {"req_frequency": 0}))
accounts_total = set()
for account_data in account_summary_data:
company = find_region_by_code(account_data["jobnum"], jobnum_region_dict)
count = account_data["count"]
account = account_data["account"]
jobnum = account_data["jobnum"]
account_detail_dict_key = "{}{}".format(account, jobnum)
# 更新统计数据
grouped_data[company]["reqs"] += count
grouped_data[company]["accounts"].add(account)
accounts_total.add(account)
# 更新下钻详情
account_detail_dict[company][account_detail_dict_key]["req_account"] = account
account_detail_dict[company][account_detail_dict_key]["req_jobnum"] = jobnum
account_detail_dict[company][account_detail_dict_key]["req_frequency"] += count
# 统计总请求次数和独立账号数
reqs_total = sum(data["reqs"] for data in grouped_data.values())
# 请求为0抛出
if reqs_total == 0:
return result
# 构建summary部分
account_data_list = [
{
"company": company,
"req_frequency": data["reqs"],
# 本公司的 请求次数/所有公司 请求次数的合计
"frequency_rate": round(data["reqs"] / reqs_total, 4),
"account_count": len(data["accounts"]),
# 本公司的 账号次数/所有公司 账号次数的合计
"account_rate": round(len(data["accounts"]) / len(accounts_total), 4),
# 本公司的 请求次数/本公司 账号次数的合计
"account_avg": data["reqs"] // len(data["accounts"]),
}
for company, data in grouped_data.items()
]
result["summary"]["account"] = sorted(account_data_list, key=lambda x: x["req_frequency"], reverse=True)
# 构建detail部分
result["detail"]["account"] = {company: sorted(data.values(), key=lambda x: x['req_frequency'], reverse=True)
for company, data in account_detail_dict.items()}
return result
def interface_summary_data_format(interface_summary_data):
"""
接口维度数据转换方法
"""
result = {"summary": {"interface": []}, "detail": {"interface": {}}}
grouped_data = defaultdict(lambda: {"reqs": 0})
interface_detail_dict = defaultdict(lambda: defaultdict(lambda: {"req_frequency": 0}))
for interface_data in interface_summary_data:
count = interface_data["count"]
interface = interface_data["interface"]
jobnum = interface_data["jobnum"]
account = interface_data["account"]
ip = interface_data["ip"]
interface_detail_dict_key = "{}{}".format(ip, account, jobnum)
# 更新统计数据
grouped_data[interface]["reqs"] += count
# 构建下钻详情
interface_detail_dict[interface][interface_detail_dict_key]["interface_addr"] = interface
interface_detail_dict[interface][interface_detail_dict_key]["req_ip"] = ip
interface_detail_dict[interface][interface_detail_dict_key]["req_account"] = account
interface_detail_dict[interface][interface_detail_dict_key]["req_jobnum"] = jobnum
interface_detail_dict[interface][interface_detail_dict_key]["req_frequency"] += count
# 统计总请求次数
reqs_total = sum(data["reqs"] for data in grouped_data.values())
# 请求为0抛出
if reqs_total == 0:
return result
# 构建summary部分
interface_data_list = [
{
"interface_addr": interface,
"req_frequency": data["reqs"],
# 本接口的 请求次数/所有接口 请求次数的合计
"frequency_rate": round(data["reqs"] / reqs_total, 4),
# 本接口的 请求次数/ 20 查询top20接口
"frequency_avg": data["reqs"] // 20,
}
for company, data in grouped_data.items()
]
result["summary"]["interface"] = sorted(interface_data_list, key=lambda x: x["req_frequency"], reverse=True)
# 构建detail部分
result["detail"]["interface"] = {
company: sorted(data.values(), key=lambda x: x["req_frequency"], reverse=True)
for company, data in interface_detail_dict.items()
}
return result
def menu_summary_data_format(menu_summary_data):
"""
菜单维度数据转换方法
"""
result = {"summary": {"menu": []}, "detail": {"menu": {}}}
grouped_data = defaultdict(lambda: {"reqs": 0, "menu": set()})
menu_detail_dict = defaultdict(lambda: defaultdict(lambda: {"req_frequency": 0}))
menu_total = set()
for menu_data in menu_summary_data:
count = menu_data["count"]
menu = menu_data["menu"]
jobnum = menu_data["jobnum"]
account = menu_data["account"]
ip = menu_data["ip"]
menu_detail_dict_key = "{}{}{}".format(ip, account, jobnum)
# 更新统计数据和独立菜单数
grouped_data[menu]["reqs"] += count
grouped_data[menu]["menu"].add(menu)
menu_total.add(menu)
# 构建下钻详情
menu_detail_dict[menu][menu_detail_dict_key]["menu_name"] = menu
menu_detail_dict[menu][menu_detail_dict_key]["req_ip"] = ip
menu_detail_dict[menu][menu_detail_dict_key]["req_account"] = account
menu_detail_dict[menu][menu_detail_dict_key]["req_jobnum"] = jobnum
menu_detail_dict[menu][menu_detail_dict_key]["req_frequency"] += count
# 统计总请求次数
reqs_total = sum(data["reqs"] for data in grouped_data.values())
# 请求为0抛出
if reqs_total == 0 or menu_total == 0:
return result
# 构建summary部分
menu_data_list = [
{
"menu_name": menu,
"req_frequency": data["reqs"],
# 本菜单的 请求次数 /所有菜单 请求次数的合计
"frequency_rate": round(data["reqs"] / reqs_total, 4),
# 本菜单的 请求次数 /所有菜单 个数的合计
"frequency_avg": data["reqs"] // len(menu_total),
}
for company, data in grouped_data.items()
]
result["summary"]["menu"] = sorted(menu_data_list, key=lambda x: x["req_frequency"], reverse=True)
# 构建detail部分
result["detail"]["menu"] = {company: sorted(data.values(), key=lambda x: x["req_frequency"], reverse=True)
for company, data in menu_detail_dict.items()}
return result

@ -0,0 +1,218 @@
#!/usr/bin/python
# encoding=utf-8
# author: tangwy
import json
import os, re
import codecs
import traceback
from collections import defaultdict
from isoc.utils.esUtil import EsUtil
from isoc.utils.dashboard_data_conversion import ip_summary_data_format, account_summary_data_format, \
interface_summary_data_format, menu_summary_data_format, calculate_time_difference, summary_data_reqs_format
from dataInterface.functions import CFunction
from dataInterface.db.params import CPgSqlParam
from ext_logging import logger
TABLE_NAME = "ueba_logs"
DATA_TYPE = {
"IP": 1,
"ACCOUNT": 2,
"INTERFACE": 3,
"MENU": 4,
}
def pg_get_ip_group_data(startTime, endTime):
"""
IP维度查询
:param startTime: 开始时间,
:param endTime: 结束时间,
"""
result = []
sql = """ select ip, jobnum, sum(count) from {TABLE_NAME}
where logdate >= %s and logate < %s and data_type = %s
group by ip, jobnum""".format(TABLE_NAME=TABLE_NAME)
res = CFunction.execute(CPgSqlParam(sql, params=(startTime, endTime, DATA_TYPE["IP"])))
if res:
for item in res:
result.append({
"ip": item[0],
"jobnum": item[2],
"count": item[3],
})
return result
def pg_get_account_group_data(startTime, endTime):
"""
账号维度查询
:param startTime: 开始时间,
:param endTime: 结束时间,
"""
result = []
sql = """ select account, jobnum, sum(count) from {TABLE_NAME}
where logdate >= %s and logate < %s and data_type = %s
group by account, jobnum""".format(TABLE_NAME=TABLE_NAME)
res = CFunction.execute(CPgSqlParam(sql, params=(startTime, endTime, DATA_TYPE["ACCOUNT"])))
if res:
for item in res:
result.append({
"account": item[0],
"jobnum": item[2],
"count": item[3],
})
return result
def pg_get_interface_group_data(startTime, endTime):
"""
接口维度查询
:param startTime: 开始时间,
:param endTime: 结束时间,
"""
result = []
sql = """ select interface, sip, jobnum, sum(count) from {TABLE_NAME}
where logdate >= %s and logate < %s and data_type = %s
group by interface, ip, jobnum""".format(TABLE_NAME=TABLE_NAME)
res = CFunction.execute(CPgSqlParam(sql, params=(startTime, endTime, DATA_TYPE["INTERFACE"])))
if res:
for item in res:
result.append({
"interface": item[0],
"ip": item[1],
"jobnum": item[2],
"count": item[3],
})
return result
def pg_get_menu_group_data(startTime, endTime):
"""
菜单维度查询
:param startTime: 开始时间,
:param endTime: 结束时间,
"""
result = []
sql = """ select menu, sip, jobnum, sum(count) from {TABLE_NAME}
where logdate >= %s and logate < %s and data_type = %s
group by menu, ip, jobnum""".format(TABLE_NAME=TABLE_NAME)
res = CFunction.execute(CPgSqlParam(sql, params=(startTime, endTime, DATA_TYPE["MENU"])))
if res:
for item in res:
result.append({
"menu": item[0],
"ip": item[1],
"jobnum": item[2],
"count": item[3],
})
return result
def pg_get_previous_company_count(startTime, endTime, data_type):
"""
账号维度查询菜请求次数
:param startTime: 开始时间,
:param endTime: 结束时间,
:param data_type: 公司聚合类型 ACCOUNT or IP ,
"""
result = defaultdict(int)
if data_type in DATA_TYPE:
data_type = DATA_TYPE[data_type]
sql = """ select jobnum, sum(count) from {TABLE_NAME}
where logdate >= %s and logate < %s and data_type = %s
group by jobnum""".format(TABLE_NAME=TABLE_NAME)
res = CFunction.execute(CPgSqlParam(sql, params=(startTime, endTime, data_type)))
if res:
for item in res:
company = find_region_by_code(item[0], jobnum_region_dict)
result[company] += item[1]
return result
def pg_get_previous_interface_count(startTime, endTime):
"""
接口维度查询请求总次数
:param startTime: 开始时间,
:param endTime: 结束时间,
"""
result = defaultdict(int)
sql = """ select interface, sum(count) from {TABLE_NAME}
where logdate >= %s and logate < %s and data_type = %s
group by interface""".format(TABLE_NAME=TABLE_NAME)
res = CFunction.execute(CPgSqlParam(sql, params=(startTime, endTime, DATA_TYPE["INTERFACE"])))
if res:
for item in res:
result[item[0]] += item[1]
return result
def pg_get_previous_menu_count(startTime, endTime):
"""
菜单维度查询请求总次数
:param startTime: 开始时间,
:param endTime: 结束时间,
"""
result = defaultdict(int)
sql = """ select menu, sum(count) from {TABLE_NAME}
where logdate >= %s and logate < %s and data_type = %s
group by menu""".format(TABLE_NAME=TABLE_NAME)
res = CFunction.execute(CPgSqlParam(sql, params=(startTime, endTime, DATA_TYPE["MENU"])))
if res:
for item in res:
result[item[0]] += item[1]
return result
def entry(data_type, start, end):
# 前一段开始时间
previous_time = calculate_time_difference(start, end)
try:
data = {}
if data_type == "1":
ip_summary_data = pg_get_ip_group_data(start, end)
data = ip_summary_data_format(ip_summary_data)
previous_company_dict = pg_get_previous_company_count(previous_time, start, "IP")
for d in data["summary"]["account"]:
d["trend"] = round(
(d["req_frequency"] - previous_company_dict.get(d["company"], 0)) / previous_company_dict.get(
d["company"], 0), 4)
if data_type == "2":
account_summary_data = pg_get_account_group_data(start, end)
data = account_summary_data_format(account_summary_data)
previous_company_dict = pg_get_previous_company_count(previous_time, start, "ACCOUNT")
for d in data["summary"]["account"]:
d["trend"] = round(
(d["req_frequency"] - previous_company_dict.get(d["company"], 0)) / previous_company_dict.get(
d["company"], 0), 4)
if data_type == "3":
interface_summary_data = pg_get_interface_group_data(start, end)
data = interface_summary_data_format(interface_summary_data)
previous_interface_dict = pg_get_previous_interface_count(previous_time, start)
for d in data["summary"]["account"]:
d["trend"] = round(
(d["req_frequency"] - previous_interface_dict.get(d["company"], 0)) / previous_interface_dict.get(
d["company"], 0), 4)
if data_type == "4":
menu_summary_data = pg_get_menu_group_data(start, end)
data = menu_summary_data_format(menu_summary_data)
previous_menu_dict = pg_get_previous_menu_count(previous_time, start)
for d in data["summary"]["account"]:
d["trend"] = round(
(d["req_frequency"] - previous_menu_dict.get(d["company"], 0)) / previous_menu_dict.get(
d["company"], 0), 4)
return data
except Exception, e:
logger.error("分析结构获取失败, err: {}, traceback: {}".format(str(e), traceback.format_exc()))
raise e

@ -0,0 +1,109 @@
# coding=utf-8
"""
@Author: fu-zhe
@FileName: db2json.py
@DateTime: 2024/5/15 10:19
@Description: 数据库工具查询之后的数据为 list嵌套 需要转换成 前端方便识别的json数据
"""
import json
import traceback
from dataInterface.functions import CFunction
from dataInterface.db.params import CPgSqlParam
from uebaMetricsAnalysis.utils.ext_logging import logger
class DBType(object):
LIST = 'list'
DICT = 'dict'
JOB_TABLE_NAME = "ueba_clean_jobs"
ANALYSIS_TABLE_NAME = "ueba_analysis_log"
class DBUtils(object):
@classmethod
def transition(cls, filed, sql, data_type):
"""
数据转换 将json格式的data转换成前端方便使用的 json
:param data_type: 转换数据类型 目前有 list dict两种
:param filed: list 格式 数据库字段 例如 select id, name from table; filed= ['id','name']
:param sql: sql语句 `select * from table`
:return: [{filed1: value1, filed2: value2,···}, ····] {filed1: value1, filed2: value2,···}
"""
data = cls.execute(sql=sql)
if cls.is_list_of_empty_lists(data):
return eval(data_type)()
if data_type == DBType.DICT:
data = data[0]
if len(filed) != len(data):
raise Exception("{}与数据库查询结果长度不一致".format(filed))
res = {cls.snake2camel(filed[i]): data[i] for i in range(len(filed))}
logger.info("res = {}".format(res))
return res
if data_type == DBType.LIST:
res = []
if not data:
return res
for item in data:
if len(item) != len(filed):
raise Exception("{}与数据库查询结果长度不一致".format(filed))
res.append({cls.snake2camel(filed[i]): item[i] for i in range(len(filed))})
return res
@classmethod
def snake2camel(cls, snake_filed):
"""
蛇形命名转换小驼峰命名
:param snake_filed: 蛇形命名 例如 user_name
:return: 驼峰命名 user_name ---> userName
"""
if not snake_filed or not isinstance(snake_filed, str):
return snake_filed
parts = snake_filed.split('_')
# 转换第一个单词为小写,其余单词首字母大写
camel = parts[0] + ''.join(word.capitalize() for word in parts[1:])
return camel
@classmethod
def execute(cls, sql):
"""
执行sql语句
:param sql: sql语句
:return:
"""
try:
sql_list = CPgSqlParam(sql)
logger.info("execute sql :\n {}\n".format(sql))
data = CFunction.execute(sql_list)
logger.info("execute result : {}".format(data))
return json.loads(data)
except Exception as e:
logger.error("execute sql error sql: \n {}\n tracback: {}\n".format(sql, traceback.format_exc()))
raise Exception("查询失败")
@classmethod
def is_list_of_empty_lists(cls, target_list):
# 更清晰地检查每个子列表是否为空
return all(not sublist or len(sublist) == 0 for sublist in target_list)
@classmethod
def list_snake2camel(cls, snake_list):
"""
将列表中 字典的snake命名变成camel命名格式
:param snake_list: list内部都是蛇形命名的dict `[{'user_name':'', 'user_age': ''}]`
:return: `[{'user_name':'', 'user_age': ''}]` ----> `[{'userName':'', 'userAge':''}]`
"""
camel_list = []
for snake_dict in snake_list:
camel_list.append({cls.snake2camel(snake): value for snake, value in snake_dict.items()})
return camel_list
@classmethod
def write_job_status(self,job_id,status,err):
sql = """update {JOB_TABLE_NAME} set status=%s err=%s
where job_id=%s """.format(JOB_TABLE_NAME=JOB_TABLE_NAME)
CFunction.execute(CPgSqlParam(sql, params=(status, err, job_id)))
@classmethod
def insert_job_record(self,job_id,start_time,end_time,status):
sql = """insert into {JOB_TABLE_NAME}(job_id,start_time,end_time,status) values(%s,%s,%s,%s)""".format(JOB_TABLE_NAME=JOB_TABLE_NAME)
CFunction.execute(CPgSqlParam(sql, params=(job_id,start_time, end_time,status)))

@ -0,0 +1,415 @@
# -*- coding: utf-8 -*-
# @Time : 2019/6/6 9:48
# @Author : panda
import datetime
import json
import time
import traceback
from elasticsearch import Elasticsearch
from elasticsearch import helpers
from appsUtils.confutil import ConfUtil
conf_util = ConfUtil()
es_dict = conf_util.getElasticsearchConf()
es_host_list = list()
for ip in es_dict.get('ip').split(','):
es_host_list.append({'host': ip, 'port': int(es_dict.get('webport'))})
# es_host_list = [{'host': '10.67.1.180', 'port': 9200}]
# 默认type
TYPE = '_doc'
# 默认分片数量
NUMBER_OF_SHARDS = 5
# 默认副本数量
NUMBER_OF_REPLICAS = 1
def to_dict(data):
"""
将sql 获取的list结果集转化为[{xx=xxx,x=xxx},{}]
:param data: sql查询的数据
:return:
"""
res = list()
columns = data.get('columns')
for row in data.get('rows'):
tmp = dict()
for i in range(0, len(row)):
tmp[columns[i]['name']] = row[i]
res.append(tmp)
return res
class EsUtil(object):
es_client = None
def __init__(self):
if not EsUtil.es_client:
EsUtil.es_client = Elasticsearch(es_host_list,
timeout=60,
max_retries=10,
retry_on_timeout=True)
self.es = EsUtil.es_client
def get_client(self):
"""
提供原生的es_Client
:return:
"""
return self.es
def is_index_exist(self, index_name):
return self.es.indices.exists(index=index_name)
def get_available_index(self, start_time=None, end_time=None, prefix=None, suffix=None):
results = list()
index_ = "*"
start_date = None
end_date = None
if prefix:
index_ = prefix + index_
if suffix:
index_ = index_ + suffix
res = self.es.cat.indices(index=index_, format="json")
if start_time:
start_date = datetime.datetime.fromtimestamp(start_time / 1000).strftime("%Y%m%d")
if end_time:
end_date = datetime.datetime.fromtimestamp(end_time / 1000).strftime("%Y%m%d")
for ind in res:
indices = ind.get('index', '').split('-')
if start_date and len(indices) > 1:
if indices[-2] < start_date:
continue
if end_date and len(indices) > 1:
if indices[-2] > end_date:
continue
results.append(ind)
return results
def get_available_index_name(self, start_time=None, end_time=None, prefix=None, suffix=None):
results = list()
indices = self.get_available_index(start_time, end_time, prefix, suffix)
if not indices:
return results
for index_ in indices:
results.append(index_.get("index"))
return results
def search_by_sql(self, sql):
"""
sql查询
keyword类型的字段才能进行分组聚合查询
:param sql:
:return:
"""
return to_dict(self.es.xpack.sql.query(body={'query': sql}))
def create_index(self, index_name, field_type_dict, number_of_shards, number_of_replicas):
"""
简单的创建索引暂时支持传入简单的键值对
:param index_name 索引名称
:param field_type_dict 字段名称类型字典
:param number_of_shards 分片数量
:param number_of_replicas 副本数量
:return: 创建成功
"""
if self.is_index_exist(index_name):
raise Exception('index [%s] is exist' % index_name)
body = dict()
settings = {
'number_of_shards': number_of_shards,
'number_of_replicas': number_of_replicas
}
mappings = dict()
index_type = dict()
properties = dict()
# print field_type_dict
for (key, value) in field_type_dict.items():
properties[key] = {'type': value}
index_type['properties'] = properties
mappings[TYPE] = index_type
body['settings'] = settings
body['mappings'] = mappings
# print json.dumps(body)
response = self.es.indices.create(index=index_name, body=body)
return response['acknowledged'] and response['shards_acknowledged']
def create_index_by_mapping_alias(self, index_name, mappings, alias_name):
"""
加入别名和动态mapping
:param index_name:
:param mappings:
:param alias_name:
:return:
"""
if self.is_index_exist(index_name):
raise Exception('index [%s] is exist' % index_name)
# es.indices.create(index=index_name)
# es.indices.put_mapping(index=index_name, body=mapping)
# es.indices.put_alias(index=index_name,name=alias_name)
# 使用一个请求创建
request_body = dict()
request_body['settings'] = {
'number_of_replicas': NUMBER_OF_REPLICAS,
'number_of_shards': NUMBER_OF_SHARDS
}
if isinstance(mappings, dict):
request_body['mappings'] = mappings
else:
request_body['mappings'] = json.loads(mappings)
request_body[index_name] = {
'aliases': {
alias_name: {}
}
}
response = self.es.indices.create(index=index_name, body=request_body)
return response['acknowledged'] and response['shards_acknowledged']
def create_index_simple(self, index_name, field_type_dict):
"""
默认五个分片一个副本或者从配置文件中读取暂定
:param index_name:
:param field_type_dict:
:return:
"""
return self.create_index(index_name, field_type_dict, NUMBER_OF_SHARDS, NUMBER_OF_REPLICAS)
def create_index_by_body(self, index_name, request_body):
"""
自己传入body进行索引的创建
:param index_name:
:param request_body: 用户传入mapping,setting设置{mappings:{'properties‘:{}},'settings':{}}
:return: 索引是否创建成功
"""
if self.is_index_exist(index_name):
raise Exception('index [%s] is exist' % index_name)
response = self.es.indices.create(index=index_name, body=request_body)
return response['acknowledged'] and response['shards_acknowledged']
def search(self, index_name, request_body, request_params=dict()):
"""
查询接口原生
:param request_params:
:param index_name:
:param request_body:
:return:
"""
return self.es.search(index=index_name, body=request_body, params=request_params, request_timeout=60)
def search_by_uri(self, index_name, uri_params):
"""
通过uri的方式进行查询
demo: test/_search?q=Panda&df=name&from=10&size=10&sort=age:desc&sort=id:desc
:param index_name:索引名称可以为空会在所有的索引中查询
:param uri_params: dict类型类似于
{
'q': 'Alice',
'df': "name",
'from': 3,
'size': 10,
'sort': [
'age:desc', 'name:desc'
]
}详细信息请查询uri语法
:return:
"""
return self.es.search(index=index_name, params=uri_params)
def scroll_search(self, index_name, scroll, request_body, request_params=dict()):
"""
通过快照进行分页查询,并返回第一个快照查询的结果和快照的id用于继续查询
此查询只能不停的向后查询不能返回上一页
:param request_params:
:param index_name 索引名称
:param scroll 快照保留的时间
:param request_body 查询的请求参数
:return: response为查询的数据scroll_msg返回并用于获取下一次的快照信息,scroll_size可用于跳出循环后记录开始from
"""
response = self.es.search(index=index_name, scroll=scroll, body=request_body, params=request_params, request_timeout=60)
scroll_msg = {'scroll_id': response.get('_scroll_id'), 'scroll': scroll}
return scroll_msg, response
def scroll_next(self, scroll_msg, request_params=dict()):
"""
传入scroll_search返回的第一个参数用于获取下一次的快照
:param request_params:
:param scroll_msg:
:return:
"""
response = self.es.scroll(body=scroll_msg, params=request_params)
scroll_msg = {'scroll_id': response.get('_scroll_id'), 'scroll': scroll_msg.get('scroll')}
return scroll_msg, response
def delete_index(self, index_name):
"""
删除
:param index_name:
:return:
"""
return self.es.indices.delete(index=index_name)['acknowledged']
def delete_index_by_alias(self, alias_name):
"""
通过别名删除索引和别名,对别名有特殊需求索引名称为 别名+标志
此方法有风险可能会删除其他人创建的alias_name*索引
谨慎使用
:return:
"""
index_name = '%s*' % alias_name
try:
if self.es.indices.exists_alias(name=alias_name, index=index_name):
self.es.indices.delete_alias(name=alias_name, index=index_name)
if self.es.indices.exists(index=index_name):
self.es.indices.delete(index=index_name)
except:
traceback.format_exc()
return not (self.es.indices.exists_alias(name=alias_name, index=index_name) and self.es.indices.exists(
index=index_name))
def index(self, index_name, request_body):
"""
单条doc插入
:param index_name 索引名称
:param request_body 请求数据dict
{
"name": "Alice",
"address": "武汉",
"age": 1,
"birthday": "2019-06-03T18:47:45.999"
}
:return:
"""
return self.es.index(index=index_name, doc_type=TYPE, body=request_body).get('result')
def bulk_insert(self, index_name, data_list):
"""
批量插入
:return:
"""
actions = list()
for data in data_list:
action = {
"_index": index_name,
"_type": TYPE,
'_source': data
}
actions.append(action)
return helpers.bulk(self.es, actions)
def search_after_start(self, index_name, request_body):
"""
通过elasticsearch search after 避免深度分页的问题
:return:
"""
if request_body.get('size') is None and request_body.get('sort') is None:
raise Exception('request body is not validate')
response = self.es.search(index=index_name, body=request_body)
search_after_body = {
'size': request_body.get('size'),
'sort': request_body.get('sort'),
'search_after': request_body.get('hits', {}).get('hits', {}).get('sort')
}
return search_after_body, response
def search_after(self, index_name, search_after_body):
"""
search_after
:param index_name:
:param search_after_body
:return:
"""
response = self.es.search(index=index_name, body=search_after_body)
search_after_body = {
'size': search_after_body.get('size'),
'sort': search_after_body.get('sort'),
'search_after': response.get('hits', {}).get('hits', {}).get('sort')
}
return search_after_body, response
def add_field(self, index, mapping):
"""
新增索引字段
@param index: 索引名称
@param mapping: 参数样例 mapping = {
"properties": {
field_name: {
"type": field_type
}
}
}
@return:
"""
self.es.indices.put_mapping(index=index, doc_type="_doc",body=mapping,include_type_name=True)
def bulk_update(self, index, query_dsl):
"""
批量更新
@param index: 索引名称
@param query_dsl: 满足更新条件的查询语句 {
"query":{
"bool":{"must":[
{
"terms":{
"log_id":["c6c8eaca-d891-4f0e-b15b-b02f02dbe4df","92f40a7c-e3f1-412d-9a00-72f22b7ebc9b","4257dbe6-369a-42f5-9f14-4406a3eb5c7a"]
}
}
]}
},
"script":{
"inline":"ctx._source.dport = params.dport",
"params":{
"dport":50801
},
"lang":"painless"
}
}
@return:
"""
self.es.update_by_query(index=index,body=query_dsl)
if __name__ == '__main__':
es = EsUtil()
# print es.is_index_exist('test')
#es.search('', {})
index_name = "internal_isop_log-*"
# mapping = {
# "properties": {
# "is_report": {
# "type": "boolean"
# }
# }
# }
#
# es.add_field(index_name,mapping)
index = 'internal_isop_incident-*'
query_dsl ={
"query":{
"bool":{"must":[
{
"terms":{
"id":[ ["9f00c0be-ba38-4edc-9f39-889a57ef89c4cq", "29a9c4dc-e7d4-432b-aef8-d216401cb9e5cq", "8494a6be-f80e-4983-adee-92cbf7ef5c31cq"]]
}
}
]}
},
"script":{
"inline":"ctx._source.is_report = params.is_report",
"params":{
"is_report":True
},
"lang":"painless"
}
}
es.bulk_update(index,query_dsl)

@ -0,0 +1,90 @@
#!/usr/bin/python
#encoding=utf-8
# author: tangwy
import json
import os,re
import codecs
import csv
import ConfigParser
from isoc.utils.esUtil import EsUtil
print json.dumps(es_host_list)
# conf_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'conf')
# ini_path = os.path.join(conf_path, 'conf.ini')
# config = ConfigParser.ConfigParser()
# config.read(ini_path)
# ES_HOST = config.get('COMMON', 'es_host')
# ES_PER_COUNT = config.get('COMMON', 'es_per_count')
# ES_INDEX_NAME = config.get('COMMON', 'es_index_name')
# CSV_FILE_PATH = config.get('COMMON', 'csv_file_path')
def createIndex():
es = Elasticsearch(es_host_list)
es.create(index="urba_analyse_2024_06", ignore=400)
map={
"ip1": "text",
"ip2": "text",
"ip3": "text",
"ip4": "text",
}
es_instance = EsUtil()
res = es_instance.create_index_simple("urba_analyse_2024_06")
return res
# def generate_ip_range(start_ip, end_ip):
# start_parts = list(map(int, start_ip.split('.')))
# end_parts = list(map(int, end_ip.split('.')))
# ip_range = []
# while start_parts < end_parts:
# ip_range.append('.'.join(map(str, start_parts)))
# start_parts[3] += 1
# for i in range(3, 0, -1):
# if start_parts[i] == 256:
# start_parts[i] = 0
# start_parts[i-1] += 1
# ip_range.append('.'.join(map(str, start_parts))) # 添加结束IP地址
# return ip_range
# # scroll查询数据
# def get_ip_summary_data(start_time,end_time,query_body):
# es = Elasticsearch(ES_HOST)
# msg = es.search(index=ES_INDEX_NAME,scroll="3m",size=ES_PER_COUNT,_source_includes= ["cookies","url","sip","dip"], query=query_body)
# result = msg['hits']['hits']
# total = msg['hits']['total']
# scroll_id = msg['_scroll_id']
# for i in range(0,int(total["value"]/ES_PER_COUNT)+1):
# query_scroll = es.scroll(scroll_id=scroll_id, scroll='3m')["hits"]["hits"]
# result += query_scroll
# return result
# # 读取csv文件 获取ip归属地
# def get_ip_area_relation(csv_file_path):
# iprange_map = {}
# with codecs.open(csv_file_path, mode='r',encoding='utf-8') as file:
# csv_reader = csv.reader(file)
# for row in csv_reader:
# headers = next(csv_reader)
# ip_start = headers[0]
# ip_end = headers[1]
# ip_range = generate_ip_range(ip_start, ip_end)
# ip_area = headers[5]
# print (ip_area)
# for ip in ip_range:
# iprange_map[ip] = ip_area
# return iprange_map
# get_ip_area_relation("/tmp/data/ip_area_relation.csv")

@ -10,7 +10,7 @@ import os
from mlogging import TimedRotatingFileHandler_MP from mlogging import TimedRotatingFileHandler_MP
from appsUtils import env from appsUtils import env
APPFOLDERNAME = 'UebaMetricsAnalysis' APPFOLDERNAME = 'uebaMetricsAnalysis'
def get_logger(logfile): def get_logger(logfile):

@ -6,48 +6,50 @@
@Description: @Description:
""" """
import json import json,os
import traceback import traceback,time
from rest_framework import viewsets from rest_framework import viewsets
from rest_framework.decorators import list_route, detail_route from rest_framework.decorators import list_route, detail_route
from UebaMetricsAnalysis.utils.ext_logging import logger from uebaMetricsAnalysis.utils.ext_logging import logger
from UebaMetricsAnalysis.lib.result import Result from uebaMetricsAnalysis.lib.result import Result
from uebaMetricsAnalysis.utils import config
from uebaMetricsAnalysis.utils.dashboard_data import entry
class DashboardViewSets(viewsets.GenericViewSet): class DashboardViewSets(viewsets.GenericViewSet):
@list_route(methods=['GET']) @list_route(methods=['GET'])
def training_cyber_range(self, request): def get_summary_data_list(self,request):
type = request.GET.get('type')
try: try:
return self.get_summary_data(self,type) data_type = request.GET.get('type')
startTime = "2024-01-01T00:00:00Z"# request.GET.get('startDate')
endTime = "2024-07-11T00:00:00Z" #request.GET.get('endDate')
#1:ip,2:账号,3:接口,4:菜单
logger.info("获取分析结构数据:"+data_type+";" + startTime +";"+ endTime)
return Result.ok(entry(data_type,startTime,endTime))
except Exception, e: except Exception, e:
logger.error("实训大屏数据获取失败, err: {}, traceback: {}".format(str(e), traceback.format_exc())) logger.error(traceback.format_exc())
return Result.failed("查询失败", str(e)) return Result.failed("查询失败", str(e))
@list_route(methods=['GET']) @list_route(methods=['POST'])
def get_summary_data_list(self,request): def write_audit_log(self, request):
data_type = request.GET.get('type')
try: try:
if data_type == "ip": body = json.loads(request.body)
return self.get_ip_summary_data() action =body.get("action")
if data_type == "account": username = request.session.get('username',"unknown user")
return self.get_account_summary_data() params = body.get("params")
if data_type == "interface": logger.info("Audit_Log:"+username+","+action +",params:"+json.dumps(params))
return self.get_interface_summary_data()
if data_type == "menu":
return self.get_menu_summary_data()
return Result.ok({"status":200})
except Exception, e: except Exception, e:
logger.error("实训大屏数据获取失败, err: {}, traceback: {}".format(str(e), traceback.format_exc())) logger.info("Audit_Log:{}, err: {}, traceback: {}".format(username, str(e), traceback.format_exc()))
return Result.failed("查询失败", str(e)) return Result.ok("ok")
def get_ip_summary_data(self):
return Result.ok({"status":200}) @list_route(methods=['GET'])
def get_default_rule(self,request):
def get_account_summary_data(self): conf_path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'conf')
return Result.ok({"status":200}) jsonfile_path = os.path.join(conf_path, 'defaultRule.json')
rule_data = config.read_json_config(jsonfile_path)
def get_interface_summary_data(self): return Result.ok(rule_data)
return Result.ok({"status":200})
def get_menu_summary_data(self): @list_route(methods=['GET'])
return Result.ok({"status":200}) def create_index(self,request):
res= es_operation.createIndex()
logger.error(json.dumps(res))
return Result.ok(json.dumps(res))

@ -0,0 +1,801 @@
# coding=utf-8
from __future__ import unicode_literals
data = {
"summary": {
"ip": [
{
"company": "宜昌分公司",
"req_frequency": 517,
"frequency_rate": 17.1475953565506,
"ip_count": 8,
"ip_rate": 0.195121951219512,
"ip_avg": 2.14344941956882,
"trend": 0.09
},
{
"company": "随州分公司",
"req_frequency": 329,
"frequency_rate": 10.9121061359867,
"ip_count": 7,
"ip_rate": 0.170731707317073,
"ip_avg": 1.55887230514096,
"trend": 0.1
},
{
"company": "孝感分公司",
"req_frequency": 399,
"frequency_rate": 13.2338308457711,
"ip_count": 7,
"ip_rate": 0.170731707317073,
"ip_avg": 1.89054726368159,
"trend": -0.07
},
{
"company": "黄冈分公司",
"req_frequency": 495,
"frequency_rate": 16.4179104477612,
"ip_count": 9,
"ip_rate": 0.219512195121951,
"ip_avg": 1.82421227197347,
"trend": -0.02
},
{
"company": "省公司",
"req_frequency": 1275,
"frequency_rate": 42.2885572139304,
"ip_count": 10,
"ip_rate": 0.24390243902439,
"ip_avg": 4.22885572139304,
"trend": 0.1
}
],
"account": [
{
"company": "宜昌分公司",
"req_frequency": 134,
"frequency_rate": 19.7058823529412,
"account_count": 8,
"account_rate": 0.242424242424242,
"account_avg": 2.46323529411765,
"trend": 0.09
},
{
"company": "随州分公司",
"req_frequency": 73,
"frequency_rate": 10.7352941176471,
"account_count": 7,
"account_rate": 0.212121212121212,
"account_avg": 1.53361344537815,
"trend": 0.1
},
{
"company": "孝感分公司",
"req_frequency": 225,
"frequency_rate": 33.0882352941176,
"account_count": 7,
"account_rate": 0.212121212121212,
"account_avg": 4.72689075630252,
"trend": -0.07
},
{
"company": "黄冈分公司",
"req_frequency": 166,
"frequency_rate": 24.4117647058824,
"account_count": 9,
"account_rate": 0.272727272727273,
"account_avg": 2.71241830065359,
"trend": -0.02
},
{
"company": "省公司",
"req_frequency": 216,
"frequency_rate": 31.7647058823529,
"account_count": 10,
"account_rate": 0.303030303030303,
"account_avg": 3.17647058823529,
"trend": 0.1
}
],
"interface": [
{
"interface_addr": "http://190.89.233.2:8909/getUser",
"req_frequency": 212,
"frequency_rate": 0.160727824109174,
"frequency_avg": 0,
"trend": 0.07
},
{
"interface_addr": "http://190.89.233.2:8909/getpublicconfig",
"req_frequency": 225,
"frequency_rate": 0.170583775587566,
"frequency_avg": 0,
"trend": 0.02
},
{
"interface_addr": "http://190.89.233.2:8909/update/sysconfig",
"req_frequency": 882,
"frequency_rate": 0.66868840030326,
"frequency_avg": 0,
"trend": -0.09
}
],
"menu": [
{
"menu_name": "菜单1",
"req_frequency": 333,
"frequency_rate": 0.263449367088608,
"frequency_avg": 111,
"trend": 0.09
},
{
"menu_name": "菜单2",
"req_frequency": 315,
"frequency_rate": 0.249208860759494,
"frequency_avg": 105,
"trend": -0.01
},
{
"menu_name": "菜单3",
"req_frequency": 616,
"frequency_rate": 0.487341772151899,
"frequency_avg": 205.333333333333,
"trend": 0.02
}
]
},
"detail": {
"ip": {
"宜昌分公司": [
{
"req_ip": "192.156.3.11",
"req_jobnum": "54411",
"req_frequency": 22
},
{
"req_ip": "192.156.3.12",
"req_jobnum": "54411",
"req_frequency": 12
},
{
"req_ip": "192.156.3.19",
"req_jobnum": "54411",
"req_frequency": 78
},
{
"req_ip": "192.156.3.20",
"req_jobnum": "54411",
"req_frequency": 79
},
{
"req_ip": "192.156.3.21",
"req_jobnum": "54411",
"req_frequency": 80
},
{
"req_ip": "192.156.3.22",
"req_jobnum": "54411",
"req_frequency": 81
},
{
"req_ip": "192.156.3.23",
"req_jobnum": "54411",
"req_frequency": 82
},
{
"req_ip": "192.156.3.24",
"req_jobnum": "54411",
"req_frequency": 83
}
],
"随州分公司": [
{
"req_ip": "192.116.3.24",
"req_jobnum": 54415,
"req_frequency": 44
},
{
"req_ip": "192.116.3.25",
"req_jobnum": "54411",
"req_frequency": 45
},
{
"req_ip": "192.116.3.26",
"req_jobnum": "54411",
"req_frequency": 46
},
{
"req_ip": "192.116.3.27",
"req_frequency": 47
},
{
"req_ip": "192.116.3.28",
"req_frequency": 48
},
{
"req_ip": "192.116.3.29",
"req_frequency": 49
},
{
"req_ip": "192.116.3.30",
"req_frequency": 50
}
],
"孝感分公司": [
{
"req_ip": "192.126.3.24",
"req_frequency": 54
},
{
"req_ip": "192.126.3.25",
"req_frequency": 55
},
{
"req_ip": "192.126.3.26",
"req_frequency": 56
},
{
"req_ip": "192.126.3.27",
"req_frequency": 57
},
{
"req_ip": "192.126.3.28",
"req_frequency": 58
},
{
"req_ip": "192.126.3.29",
"req_frequency": 59
},
{
"req_ip": "192.106.3.30",
"req_frequency": 60
}
],
"黄冈分公司": [
{
"req_ip": "192.106.3.30",
"req_frequency": 51
},
{
"req_ip": "192.106.3.31",
"req_frequency": 52
},
{
"req_ip": "192.106.3.32",
"req_frequency": 53
},
{
"req_ip": "192.106.3.33",
"req_frequency": 54
},
{
"req_ip": "192.106.3.34",
"req_frequency": 55
},
{
"req_ip": "192.106.3.35",
"req_frequency": 56
},
{
"req_ip": "192.106.3.36",
"req_frequency": 57
},
{
"req_ip": "192.106.3.37",
"req_frequency": 58
},
{
"req_ip": "192.106.3.38",
"req_frequency": 59
}
],
"省公司": [
{
"req_ip": "192.146.3.38",
"req_frequency": 123
},
{
"req_ip": "192.146.3.39",
"req_frequency": 124
},
{
"req_ip": "192.146.3.40",
"req_frequency": 125
},
{
"req_ip": "192.146.3.41",
"req_frequency": 126
},
{
"req_ip": "192.146.3.42",
"req_frequency": 127
},
{
"req_ip": "192.146.3.43",
"req_frequency": 128
},
{
"req_ip": "192.146.3.44",
"req_frequency": 129
},
{
"req_ip": "192.146.3.45",
"req_frequency": 130
},
{
"req_ip": "192.146.3.46",
"req_frequency": 131
},
{
"req_ip": "192.146.3.47",
"req_frequency": 132
}
]
},
"account": {
"宜昌分公司": [
{
"req_account": "huqx",
"req_frequency": 33,
"req_jobnum": 54412
},
{
"req_account": "zhangsf",
"req_frequency": 34,
"req_jobnum": 54413
},
{
"req_account": "zhaoj",
"req_frequency": 35,
"req_jobnum": 54414
}
],
"随州分公司": [
{
"req_account": "sangdq",
"req_frequency": 36,
"req_jobnum": 54415
},
{
"req_account": "hujt",
"req_frequency": 37,
"req_jobnum": 54416
}
],
"孝感分公司": [
{
"req_account": "zhangs",
"req_frequency": 98,
"req_jobnum": 43325
},
{
"req_account": "lin",
"req_frequency": 43,
"req_jobnum": 43326
},
{
"req_account": "liuhr",
"req_frequency": 33,
"req_jobnum": 43327
},
{
"req_account": "sunxq01",
"req_frequency": 51,
"req_jobnum": 43328
}
],
"黄冈分公司": [
{
"req_account": "shicl",
"req_frequency": 47,
"req_jobnum": 65341
},
{
"req_account": "gongxs",
"req_frequency": 65,
"req_jobnum": 65342
},
{
"req_account": "sunzs",
"req_frequency": 54,
"req_jobnum": 65343
}
],
"省公司": [
{
"req_account": "maoxt",
"req_frequency": 37,
"req_jobnum": 98761
},
{
"req_account": "xiaod01",
"req_frequency": 29,
"req_jobnum": 98761
},
{
"req_account": "qingsx",
"req_frequency": 71,
"req_jobnum": 98761
},
{
"req_account": "guobg",
"req_frequency": 79,
"req_jobnum": 98761
}
]
},
"interface": {
"http://190.89.233.2:8909/getUser": [
{
"interface_addr": "http://190.89.233.2:8909/getUser",
"req_frequency": 23,
"req_ip": "192.156.3.12",
"req_account": "zhangq",
"req_jobnum": 54411
},
{
"interface_addr": "http://190.89.233.2:8909/getUser",
"req_frequency": 24,
"req_ip": "192.156.3.12",
"req_account": "huqx",
"req_jobnum": 54412
},
{
"interface_addr": "http://190.89.233.2:8909/getUser",
"req_frequency": 25,
"req_ip": "192.156.3.13",
"req_account": "zhangsf",
"req_jobnum": 54413
},
{
"interface_addr": "http://190.89.233.2:8909/getUser",
"req_frequency": 26,
"req_ip": "192.156.3.14",
"req_account": "zhaoj",
"req_jobnum": 54414
},
{
"interface_addr": "http://190.89.233.2:8909/getUser",
"req_frequency": 27,
"req_ip": "192.156.3.15",
"req_account": "sangdq",
"req_jobnum": 54415
},
{
"interface_addr": "http://190.89.233.2:8909/getUser",
"req_frequency": 28,
"req_ip": "192.156.3.16",
"req_account": "hujt",
"req_jobnum": 54416
},
{
"interface_addr": "http://190.89.233.2:8909/getUser",
"req_frequency": 29,
"req_ip": "192.156.3.17",
"req_account": "zhangs",
"req_jobnum": 43325
},
{
"interface_addr": "http://190.89.233.2:8909/getUser",
"req_frequency": 30,
"req_ip": "192.156.3.18",
"req_account": "lin",
"req_jobnum": 43326
}
],
"http://190.89.233.2:8909/getpublicconfig": [
{
"interface_addr": "http://190.89.233.2:8909/getpublicconfig",
"req_frequency": 43,
"req_ip": "192.156.3.12",
"req_account": "liuhr",
"req_jobnum": 43327
},
{
"interface_addr": "http://190.89.233.2:8909/getpublicconfig",
"req_frequency": 44,
"req_ip": "192.156.3.12",
"req_account": "sunxq01",
"req_jobnum": 43328
},
{
"interface_addr": "http://190.89.233.2:8909/getpublicconfig",
"req_frequency": 45,
"req_ip": "192.156.3.18",
"req_account": "shicl",
"req_jobnum": 65341
},
{
"interface_addr": "http://190.89.233.2:8909/getpublicconfig",
"req_frequency": 46,
"req_ip": "192.106.3.33",
"req_account": "gongxs",
"req_jobnum": 65342
},
{
"interface_addr": "http://190.89.233.2:8909/getpublicconfig",
"req_frequency": 47,
"req_ip": "192.106.3.34",
"req_account": "sunzs",
"req_jobnum": 65343
}
],
"http://190.89.233.2:8909/update/sysconfig": [
{
"interface_addr": "http://190.89.233.2:8909/update/sysconfig",
"req_frequency": 34,
"req_ip": "192.106.3.35",
"req_account": "zhangsf",
"req_jobnum": 54415
},
{
"interface_addr": "http://190.89.233.2:8909/update/sysconfig",
"req_frequency": 23,
"req_ip": "192.106.3.36",
"req_account": "zhaoj",
"req_jobnum": 54416
},
{
"interface_addr": "http://190.89.233.2:8909/update/sysconfig",
"req_frequency": 78,
"req_ip": "192.106.3.37",
"req_account": "sangdq",
"req_jobnum": 43325
},
{
"interface_addr": "http://190.89.233.2:8910/update/sysconfig",
"req_frequency": 79,
"req_ip": "192.146.3.38",
"req_account": "hujt",
"req_jobnum": 43326
},
{
"interface_addr": "http://190.89.233.2:8911/update/sysconfig",
"req_frequency": 80,
"req_ip": "192.146.3.39",
"req_account": "zhangs",
"req_jobnum": 43327
},
{
"interface_addr": "http://190.89.233.2:8912/update/sysconfig",
"req_frequency": 81,
"req_ip": "192.146.3.40",
"req_account": "lin",
"req_jobnum": 43328
},
{
"interface_addr": "http://190.89.233.2:8913/update/sysconfig",
"req_frequency": 82,
"req_ip": "192.146.3.41",
"req_account": "liuhr",
"req_jobnum": 65341
},
{
"interface_addr": "http://190.89.233.2:8914/update/sysconfig",
"req_frequency": 83,
"req_ip": "192.146.3.42",
"req_account": "sunxq01",
"req_jobnum": 65342
},
{
"interface_addr": "http://190.89.233.2:8915/update/sysconfig",
"req_frequency": 84,
"req_ip": "192.146.3.43",
"req_account": "xiaod01",
"req_jobnum": 65343
},
{
"interface_addr": "http://190.89.233.2:8916/update/sysconfig",
"req_frequency": 85,
"req_ip": "192.146.3.44",
"req_account": "qingsx",
"req_jobnum": 98761
},
{
"interface_addr": "http://190.89.233.2:8917/update/sysconfig",
"req_frequency": 86,
"req_ip": "192.146.3.45",
"req_account": "guobg",
"req_jobnum": 98761
},
{
"interface_addr": "http://190.89.233.2:8918/update/sysconfig",
"req_frequency": 87,
"req_ip": "192.146.3.46",
"req_account": "zhangq",
"req_jobnum": 98761
}
]
},
"menu": {
"菜单1": [
{
"menu_name": "菜单1",
"req_frequency": 53,
"req_ip": "192.106.3.32",
"req_account": "lin",
"req_jobnum": "43326"
},
{
"menu_name": "菜单1",
"req_frequency": 54,
"req_ip": "192.106.3.33",
"req_account": "liuhr",
"req_jobnum": "43327"
},
{
"menu_name": "菜单1",
"req_frequency": 55,
"req_ip": "192.106.3.34",
"req_account": "sunxq01",
"req_jobnum": "43328"
},
{
"menu_name": "菜单1",
"req_frequency": 56,
"req_ip": "192.106.3.35",
"req_account": "shicl",
"req_jobnum": "65341"
},
{
"menu_name": "菜单1",
"req_frequency": 57,
"req_ip": "192.106.3.36",
"req_account": "gongxs",
"req_jobnum": "65342"
},
{
"menu_name": "菜单1",
"req_frequency": 58,
"req_ip": "192.106.3.37",
"req_account": "sunzs",
"req_jobnum": "65343"
}
],
"菜单2": [
{
"menu_name": "菜单2",
"req_frequency": 31,
"req_ip": "192.156.3.12",
"req_account": "zhangq",
"req_jobnum": "54411"
},
{
"menu_name": "菜单2",
"req_frequency": 32,
"req_ip": "192.156.3.12",
"req_account": "huqx",
"req_jobnum": "54412"
},
{
"menu_name": "菜单2",
"req_frequency": 33,
"req_ip": "192.156.3.13",
"req_account": "zhangsf",
"req_jobnum": "54413"
},
{
"menu_name": "菜单2",
"req_frequency": 34,
"req_ip": "192.156.3.14",
"req_account": "zhaoj",
"req_jobnum": "54414"
},
{
"menu_name": "菜单2",
"req_frequency": 35,
"req_ip": "192.156.3.15",
"req_account": "sangdq",
"req_jobnum": "54415"
},
{
"menu_name": "菜单2",
"req_frequency": 36,
"req_ip": "192.156.3.16",
"req_account": "hujt",
"req_jobnum": "54416"
},
{
"menu_name": "菜单2",
"req_frequency": 37,
"req_ip": "192.156.3.17",
"req_account": "zhangs",
"req_jobnum": "43325"
},
{
"menu_name": "菜单2",
"req_frequency": 38,
"req_ip": "192.156.3.18",
"req_account": "lin",
"req_jobnum": "43326"
},
{
"menu_name": "菜单2",
"req_frequency": 39,
"req_ip": "192.156.3.12",
"req_account": "liuhr",
"req_jobnum": "43327"
}
],
"菜单3": [
{
"menu_name": "菜单3",
"req_frequency": 51,
"req_ip": "192.106.3.33",
"req_account": "gongxs",
"req_jobnum": "65342"
},
{
"menu_name": "菜单3",
"req_frequency": 52,
"req_ip": "192.106.3.34",
"req_account": "sunzs",
"req_jobnum": "65343"
},
{
"menu_name": "菜单3",
"req_frequency": 53,
"req_ip": "192.106.3.35",
"req_account": "zhangsf",
"req_jobnum": "54415"
},
{
"menu_name": "菜单3",
"req_frequency": 54,
"req_ip": "192.106.3.36",
"req_account": "zhaoj",
"req_jobnum": "54416"
},
{
"menu_name": "菜单3",
"req_frequency": 55,
"req_ip": "192.106.3.37",
"req_account": "sangdq",
"req_jobnum": "43325"
},
{
"menu_name": "菜单3",
"req_frequency": 56,
"req_ip": "192.146.3.38",
"req_account": "hujt",
"req_jobnum": "43326"
},
{
"menu_name": "菜单3",
"req_frequency": 57,
"req_ip": "192.146.3.39",
"req_account": "zhangs",
"req_jobnum": "43327"
},
{
"menu_name": "菜单3",
"req_frequency": 58,
"req_ip": "192.146.3.40",
"req_account": "lin",
"req_jobnum": "43328"
},
{
"menu_name": "菜单3",
"req_frequency": 59,
"req_ip": "192.146.3.41",
"req_account": "liuhr",
"req_jobnum": "65341"
},
{
"menu_name": "菜单3",
"req_frequency": 60,
"req_ip": "192.146.3.42",
"req_account": "sunxq01",
"req_jobnum": "65342"
},
{
"menu_name": "菜单3",
"req_frequency": 61,
"req_ip": "192.146.3.43",
"req_account": "xiaod01",
"req_jobnum": "65343"
}
]
}
}
}
Loading…
Cancel
Save