# -*- coding: utf-8 -*-
"""
Created on Mon Sep 27 19:00:42 2021
@author: Administrator
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 24 18:11:10 2021
@author: Administrator
"""
import pandas as pd
from presto_cli import presto_client
import numpy as np
import datetime
from pandas.io.json import json_normalize
import time
import json
from presto_cli import presto_client
from presto_cli_v2 import presto_client_v2 as presto
import pandas as pd
import collections
import json
import joblib
import numpy as np
vsql ="""
SELECT DISTINCT apply_id
,name
,dt
,created_at
,json_extract_scalar(final_score_data,'$.score') as score
,var_data
FROM
gzlc_real.fact_risk_algo_api_log
WHERE dt >= '2021-09-27' and algo_name in ('bank_reject_cfm_c2c_v1') and created_at>='2021-09-27 18:00:54'
"""
class connectHiv(object):
def __init__(self,sql,path ="xxx",port = 443, username="xxx", source="pf=hubble;client=pyhive"):
self.path = path
self.prot = port
self.username = username
self.source = source
self.sql = sql
self.getCursor()
def getCursor(self):
self.CURSOR =presto_client.connect(self.path, port=self.prot, username=self.username,
group="xxx", password='xxx', catalog="hive", schema="xxx",
).cursor()
def querySQL(self):
self.CURSOR.execute(self.sql)
result = self.CURSOR.fetchall()
return result
def getSql(self):
print(self.sql)
#获取数据库连接
cursor = connectHiv(sql=vsql)
result = cursor.querySQL()
#将数据存储成表格形式
df11111 = pd.DataFrame(result)
df11111.columns=['apply_id','name','dt','created_at','score','var_data']
df_to_use = df11111[df11111['apply_id'] != 'apply_id']
df_to_use.reset_index(inplace=True)
global false, null, true
false = null = true = ''
all_df =None
order_id_list = df_to_use['apply_id'].values.tolist() # 以order_id 或apply_id for循环
for ai in range(len(order_id_list)):
try:
data_dump = [json.loads(df_to_use['var_data'][ai])]
jsonformat = json.dumps(data_dump, sort_keys=True, indent=4, separators=(',', ': '))
df1 = pd.DataFrame.from_dict(json_normalize(data_dump), orient='columns')
dforder=pd.DataFrame()
dforder['apply_id']=[df_to_use['apply_id'][ai]]
#dforder['business_code'] = [df_to_use['business_code'][ai]]
df = pd.merge(dforder,df1,left_index=True,right_index=True)
if all_df is None:
all_df = df
print(ai)
else:
all_df = pd.concat([all_df, df])
print(ai)
except:
print(df_to_use['id'][ai])
f = "/失败订单记录.txt"
a = 1
false_order = str(df_to_use['id'][ai])
with open(f,"a") as file:
for i in range(a):
file.write(false_order+"\n")
a +=1
all_df=all_df.drop_duplicates()
final_df = pd.merge(df_to_use,all_df,how='left',on='apply_id')
python跑hivesql
最新推荐文章于 2024-01-18 13:32:56 发布