2023-06-30 17:18:31 +08:00
|
|
|
|
import datetime
|
2023-04-04 14:28:53 +08:00
|
|
|
|
import os.path
|
2023-07-04 16:44:51 +08:00
|
|
|
|
import re
|
2023-04-04 14:28:53 +08:00
|
|
|
|
import time
|
2023-07-05 14:13:30 +08:00
|
|
|
|
from ahocorasick import Automaton
|
2023-07-04 16:44:51 +08:00
|
|
|
|
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT
|
|
|
|
|
from docx.oxml.ns import qn
|
|
|
|
|
from docx.shared import Pt, Length, Inches
|
2023-06-30 17:18:31 +08:00
|
|
|
|
from docxtpl import DocxTemplate
|
2023-02-28 16:28:48 +08:00
|
|
|
|
from fastapi import APIRouter, Depends, HTTPException
|
2023-07-06 15:13:19 +08:00
|
|
|
|
from sqlalchemy import cast, DATE, func, or_, and_
|
2023-02-28 16:28:48 +08:00
|
|
|
|
from sqlalchemy.orm import Session
|
2023-04-04 14:28:53 +08:00
|
|
|
|
from starlette.responses import FileResponse
|
2023-07-04 16:44:51 +08:00
|
|
|
|
from docx import Document
|
2023-06-30 17:18:31 +08:00
|
|
|
|
from Crud.UserCrud import get_department_config
|
2023-04-04 14:28:53 +08:00
|
|
|
|
from Models.DepartmentModel import Department
|
2023-06-30 17:18:31 +08:00
|
|
|
|
from Models.UserModel import User
|
2023-06-30 13:39:24 +08:00
|
|
|
|
from Mods.Notice.Utils import DailyNotice, daily_notice
|
2023-03-06 14:48:41 +08:00
|
|
|
|
from Schemas.DailySchemas import DailyTypeEnum
|
2023-03-01 16:04:43 +08:00
|
|
|
|
from Schemas.UserSchemas import TokenData
|
2023-03-06 09:45:17 +08:00
|
|
|
|
from Utils.AuthUtils import token_data_depend, check_auth, registered_depend
|
2023-02-28 16:28:48 +08:00
|
|
|
|
from Models.DailyModel import Daily
|
|
|
|
|
from Utils.CrudUtils import auto_create_crud
|
2023-03-01 16:04:43 +08:00
|
|
|
|
import json
|
|
|
|
|
from Schemas import DailySchemas
|
|
|
|
|
from Crud import DailyCrud
|
2023-04-04 14:28:53 +08:00
|
|
|
|
from Utils.SqlAlchemyUtils import get_db, QueryParams, query_common
|
|
|
|
|
import pandas as pd
|
2023-02-28 16:28:48 +08:00
|
|
|
|
|
|
|
|
|
router = APIRouter(
|
|
|
|
|
tags=["日报"],
|
|
|
|
|
prefix="/api/daily/daily",
|
2023-07-04 16:44:51 +08:00
|
|
|
|
dependencies=[
|
|
|
|
|
Depends(registered_depend)
|
|
|
|
|
]
|
2023-02-28 16:28:48 +08:00
|
|
|
|
)
|
|
|
|
|
|
2023-03-01 16:04:43 +08:00
|
|
|
|
|
|
|
|
|
# crud = auto_create_crud(Daily, 'daily', "日报", auto_create_keys=['create_time', 'update_time', 'id'],
|
|
|
|
|
# array_keys=['department', 'post'])
|
|
|
|
|
# crud.mount(router)
|
|
|
|
|
|
|
|
|
|
|
2023-03-02 15:19:14 +08:00
|
|
|
|
@router.post("/daily_get", response_model=DailySchemas.DailyGetRes, summary="获取日报")
|
2023-03-01 16:04:43 +08:00
|
|
|
|
def daily_get(req: DailySchemas.DailyGetReq, db: Session = Depends(get_db),
|
2023-06-28 14:42:58 +08:00
|
|
|
|
# token_data: TokenData = Depends(registered_depend)
|
|
|
|
|
):
|
2023-03-17 15:30:40 +08:00
|
|
|
|
item = DailyCrud.daily_get(db, req.id)
|
2023-06-30 18:41:45 +08:00
|
|
|
|
comments = None
|
|
|
|
|
if item.comments:
|
|
|
|
|
comments = [DailySchemas.CommentInfo(**comment.to_with_user_dict()) for comment in item.comments]
|
2023-03-17 15:30:40 +08:00
|
|
|
|
if not item:
|
|
|
|
|
raise HTTPException(detail="未取到信息")
|
2023-06-28 14:42:58 +08:00
|
|
|
|
return DailySchemas.DailyGetRes(**item.to_dict(), comments=comments)
|
2023-03-01 16:04:43 +08:00
|
|
|
|
|
|
|
|
|
|
2023-07-06 00:52:27 +08:00
|
|
|
|
json_filepath = os.path.join(os.getcwd(), 'Config', 'sensitive_word.json')
|
|
|
|
|
# 将列表保存为 json 文件
|
|
|
|
|
with open(json_filepath, 'r', encoding='utf-8') as json_file:
|
|
|
|
|
words = json.load(json_file)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SensitiveWordChecker:
|
|
|
|
|
def __init__(self, sensitive_words):
|
|
|
|
|
self.automaton = Automaton()
|
|
|
|
|
for idx, word in enumerate(sensitive_words):
|
|
|
|
|
self.automaton.add_word(word, (idx, word))
|
|
|
|
|
self.automaton.make_automaton()
|
|
|
|
|
|
|
|
|
|
def check(self, text):
|
|
|
|
|
return {item for _, item in self.automaton.iter(text)}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
checker = SensitiveWordChecker(words)
|
|
|
|
|
|
|
|
|
|
|
2023-03-02 15:19:14 +08:00
|
|
|
|
@router.post("/daily_add", response_model=DailySchemas.DailyAddRes, summary="添加日报")
|
2023-03-01 16:04:43 +08:00
|
|
|
|
def daily_add(req: DailySchemas.DailyAddReq, db: Session = Depends(get_db),
|
|
|
|
|
token_data: TokenData = Depends(token_data_depend)):
|
2023-07-06 15:13:19 +08:00
|
|
|
|
# if db.query(Daily).filter(
|
|
|
|
|
# Daily.type == DailyTypeEnum.部门子公司日报 and Daily.fill_user == token_data.email and cast(
|
|
|
|
|
# Daily.daily_time,
|
|
|
|
|
# DATE) == datetime.date.today()).first():
|
2023-07-21 16:54:27 +08:00
|
|
|
|
if req.type == DailyTypeEnum.部门子公司日报 and db.query(Daily).filter(
|
2023-07-06 15:13:19 +08:00
|
|
|
|
and_(Daily.type == DailyTypeEnum.部门子公司日报, Daily.fill_user == token_data.email), cast(
|
|
|
|
|
Daily.daily_time,
|
|
|
|
|
DATE) == datetime.date.today()
|
|
|
|
|
).first():
|
2023-07-06 14:50:36 +08:00
|
|
|
|
raise HTTPException(detail="今日日报已提交,若需修改请在原日报上修改", status_code=305)
|
2023-07-06 00:52:27 +08:00
|
|
|
|
if req.content:
|
|
|
|
|
bad_words = [item[1] for item in checker.check(req.content)]
|
|
|
|
|
if bad_words:
|
2023-07-19 17:04:32 +08:00
|
|
|
|
# raise HTTPException(detail=f"内容包含敏感词:{','.join(bad_words)}", status_code=305)
|
|
|
|
|
raise HTTPException(detail=f"填报内容包含敏感词", status_code=305)
|
2023-07-06 00:52:27 +08:00
|
|
|
|
|
2023-03-07 12:03:06 +08:00
|
|
|
|
if req.fill_user != token_data.email:
|
|
|
|
|
raise HTTPException(detail="填报人与email不符", status_code=305)
|
2023-03-01 16:04:43 +08:00
|
|
|
|
# 本部门填报权限
|
2023-03-09 16:33:39 +08:00
|
|
|
|
# print(token_data.department, token_data, "token_data.departmentxx")
|
2023-04-20 15:34:30 +08:00
|
|
|
|
# if str(req.department) not in token_data.department and not check_auth(token_data.auth_data, ['7']):
|
|
|
|
|
# raise HTTPException(detail="没有本部门填报权限", status_code=305)
|
2023-03-06 14:48:41 +08:00
|
|
|
|
new_daily = DailyCrud.daily_add(db, req.dict())
|
2023-06-28 16:49:21 +08:00
|
|
|
|
if req.type == DailyTypeEnum.运行日报:
|
2023-06-30 13:39:24 +08:00
|
|
|
|
daily_notice.send_on_daily_pdf_upload()
|
2023-03-02 15:19:14 +08:00
|
|
|
|
return DailySchemas.DailyAddRes(**new_daily.to_dict())
|
2023-03-01 16:04:43 +08:00
|
|
|
|
|
|
|
|
|
|
2023-03-02 15:19:14 +08:00
|
|
|
|
@router.post("/daily_change", response_model=DailySchemas.DailyChangeRes, summary="修改日报")
|
2023-03-01 16:04:43 +08:00
|
|
|
|
def daily_change(req: DailySchemas.DailyChangeReq, db: Session = Depends(get_db),
|
2023-03-06 09:45:17 +08:00
|
|
|
|
token_data: TokenData = Depends(registered_depend)):
|
2023-03-01 16:04:43 +08:00
|
|
|
|
# 本部门填报权限
|
2023-04-20 15:34:30 +08:00
|
|
|
|
# if str(req.department) not in token_data.department and not check_auth(token_data.auth_data, ['7']):
|
|
|
|
|
# raise HTTPException(detail="没有本部门填报权限", status_code=305)
|
2023-07-21 16:54:27 +08:00
|
|
|
|
if req.content:
|
|
|
|
|
bad_words = [item[1] for item in checker.check(req.content)]
|
|
|
|
|
if bad_words:
|
|
|
|
|
# raise HTTPException(detail=f"内容包含敏感词:{','.join(bad_words)}", status_code=305)
|
|
|
|
|
raise HTTPException(detail=f"填报内容包含敏感词", status_code=305)
|
2023-03-17 15:30:40 +08:00
|
|
|
|
new_daily = DailyCrud.daily_change(db, req.id, req.dict())
|
2023-07-11 14:55:19 +08:00
|
|
|
|
if not new_daily:
|
2023-07-21 16:54:27 +08:00
|
|
|
|
raise HTTPException(detail="原日报不存在或已删除", status_code=404)
|
2023-03-17 15:30:40 +08:00
|
|
|
|
return DailySchemas.DailyChangeRes(**new_daily.to_dict())
|
2023-03-01 16:04:43 +08:00
|
|
|
|
|
|
|
|
|
|
2023-03-02 15:19:14 +08:00
|
|
|
|
@router.post("/daily_query", response_model=DailySchemas.DailyQueryRes, summary="查询日报")
|
|
|
|
|
def daily_query(req: DailySchemas.DailyQuery, db: Session = Depends(get_db),
|
2023-03-06 09:45:17 +08:00
|
|
|
|
token_data: TokenData = Depends(registered_depend)):
|
2023-03-09 16:33:39 +08:00
|
|
|
|
# print(req, "req")
|
2023-04-04 14:28:53 +08:00
|
|
|
|
# 董监高日报动态查看 = check_auth(token_data.auth_data, [1])
|
|
|
|
|
# 所有部门动态查看 = check_auth(token_data.auth_data, [2])
|
|
|
|
|
#
|
|
|
|
|
# if req.type == DailyTypeEnum.董监高日报:
|
|
|
|
|
# if not 董监高日报动态查看:
|
|
|
|
|
# raise HTTPException(detail="无董监高日报动态查看权限", status_code=305)
|
|
|
|
|
# if req.type == DailyTypeEnum.部门子公司日报:
|
|
|
|
|
# if not 所有部门动态查看:
|
|
|
|
|
# if not req.department:
|
|
|
|
|
# raise HTTPException(detail="无所有部门动态查看权限", status_code=305)
|
|
|
|
|
# if req.department not in [int(item) for item in token_data.department.split(',')]:
|
|
|
|
|
# raise HTTPException(detail="没有该部门动态查看权限", status_code=305)
|
2023-04-23 17:24:36 +08:00
|
|
|
|
# same_department = set([req.department]) < set([int(item) for item in token_data.department.split(',')])
|
|
|
|
|
# if same_department and not check_auth(token_data.auth_data, [3]):
|
|
|
|
|
# raise HTTPException(detail="没有本部门动态查看权限", status_code=305)
|
2023-07-01 17:50:54 +08:00
|
|
|
|
count, items = DailyCrud.daily_query(db, req, token_data)
|
|
|
|
|
# new_items=[]
|
|
|
|
|
# for item in items:
|
|
|
|
|
# try:
|
|
|
|
|
# new_items.append(DailySchemas.DailyInfo(**item.to_dict()))
|
|
|
|
|
# except Exception as e:
|
|
|
|
|
# raise e
|
|
|
|
|
# print(e)
|
|
|
|
|
# print(item)
|
|
|
|
|
# print('xxxxx',item.to_dict())
|
2023-03-01 16:04:43 +08:00
|
|
|
|
items = [DailySchemas.DailyInfo(**item.to_dict()) for item in items]
|
|
|
|
|
return DailySchemas.DailyQueryRes(count=count, items=items)
|
|
|
|
|
|
|
|
|
|
|
2023-03-02 15:19:14 +08:00
|
|
|
|
@router.post("/daily_delete", response_model=DailySchemas.DailyDeleteRes, summary="删除日报")
|
2023-03-17 15:30:40 +08:00
|
|
|
|
def daily_delete(req: DailySchemas.DailyDeleteReq, db: Session = Depends(get_db), ):
|
|
|
|
|
DailyCrud.daily_delete(db, req.id)
|
2023-03-01 16:04:43 +08:00
|
|
|
|
return DailySchemas.DailyDeleteRes(msg="删除成功", state=1)
|
2023-04-03 17:04:48 +08:00
|
|
|
|
|
|
|
|
|
|
2023-04-04 14:28:53 +08:00
|
|
|
|
@router.post("/daily_export", summary="日报导出")
|
|
|
|
|
def daily_export(req: QueryParams, db: Session = Depends(get_db)):
|
|
|
|
|
count, query = query_common(db, Daily, req)
|
|
|
|
|
query = query.order_by(Daily.daily_time.desc())
|
2023-07-06 14:50:36 +08:00
|
|
|
|
department_name_dic = {item.id: item.name for item in db.query(Department).limit(1000).all()}
|
2023-04-04 14:28:53 +08:00
|
|
|
|
daily_items = []
|
|
|
|
|
for item in query:
|
|
|
|
|
daily_item = {"日报类型": item.type.name,
|
|
|
|
|
"填报人": item.user_info.name,
|
|
|
|
|
"部门": department_name_dic.get(item.department), "标题": item.title,
|
|
|
|
|
"内容": item.content, "填报时间": item.daily_time}
|
|
|
|
|
daily_items.append(daily_item)
|
|
|
|
|
temp_path = "static_data/daily_out_temp"
|
|
|
|
|
if not os.path.exists(temp_path):
|
|
|
|
|
os.makedirs(temp_path)
|
|
|
|
|
file_name = f"{time.strftime('%Y%m%d%H%M%S')}.xlsx"
|
|
|
|
|
file_path = f"{temp_path}/{file_name}"
|
|
|
|
|
pd.DataFrame(daily_items).to_excel(file_path, index=False)
|
2023-04-23 17:24:36 +08:00
|
|
|
|
return {"url": "/" + file_path}
|
2023-04-04 14:28:53 +08:00
|
|
|
|
# return FileResponse(
|
|
|
|
|
# path=file_path, headers={'Content-Disposition': f'attachment; filename="{file_name}"'}, filename=file_name)
|
2023-06-29 17:24:38 +08:00
|
|
|
|
|
|
|
|
|
|
2023-06-30 17:18:31 +08:00
|
|
|
|
# @router.post("/daily_export_to_pdf", summary="每日运行日报导出")
|
|
|
|
|
# def daily_export_to_pdf(req: DailySchemas.DailyExportToPdfReq, db: Session = Depends(get_db)):
|
|
|
|
|
# query = db.query(Daily).filter(cast(Daily.daily_time, DATE) == cast(req.day, DATE))
|
|
|
|
|
# query = query.order_by(Daily.daily_time.desc())
|
|
|
|
|
# department_name_dic = {item.id: item.name for item in db.query(Department).all()}
|
|
|
|
|
# daily_items = []
|
|
|
|
|
# for item in query:
|
|
|
|
|
# daily_item = {"日报类型": item.type.name,
|
|
|
|
|
# "填报人": item.user_info.name,
|
|
|
|
|
# "部门": department_name_dic.get(item.department), "标题": item.title,
|
|
|
|
|
# "内容": item.content, "填报时间": item.daily_time}
|
|
|
|
|
# daily_items.append(daily_item)
|
|
|
|
|
# temp_path = "static_data/daily_out_temp"
|
|
|
|
|
# if not os.path.exists(temp_path):
|
|
|
|
|
# os.makedirs(temp_path)
|
|
|
|
|
# file_name = f"{time.strftime('%Y%m%d%H%M%S')}.xlsx"
|
|
|
|
|
# file_path = f"{temp_path}/{file_name}"
|
|
|
|
|
# pd.DataFrame(daily_items).to_excel(file_path, index=False)
|
|
|
|
|
# return {"url": "/" + file_path}
|
|
|
|
|
# return FileResponse(
|
|
|
|
|
# path=file_path, headers={'Content-Disposition': f'attachment; filename="{file_name}"'}, filename=file_name)
|
|
|
|
|
|
|
|
|
|
|
2023-06-29 17:24:38 +08:00
|
|
|
|
@router.post("/daily_export_to_pdf", summary="每日运行日报导出")
|
|
|
|
|
def daily_export_to_pdf(req: DailySchemas.DailyExportToPdfReq, db: Session = Depends(get_db)):
|
2023-07-01 17:50:54 +08:00
|
|
|
|
req.day = req.day + datetime.timedelta(days=1)
|
2023-06-29 17:24:38 +08:00
|
|
|
|
query = db.query(Daily).filter(cast(Daily.daily_time, DATE) == cast(req.day, DATE))
|
|
|
|
|
query = query.order_by(Daily.daily_time.desc())
|
|
|
|
|
department_name_dic = {item.id: item.name for item in db.query(Department).all()}
|
2023-06-30 17:18:31 +08:00
|
|
|
|
daily_items = dict()
|
|
|
|
|
daily_items['董监高日报'] = list()
|
|
|
|
|
daily_items['部门子公司日报'] = list()
|
|
|
|
|
daily_items['监管和同业动态'] = list()
|
|
|
|
|
daily_items['行业信息专题分析'] = list()
|
2023-06-29 17:24:38 +08:00
|
|
|
|
for item in query:
|
|
|
|
|
daily_item = {"日报类型": item.type.name,
|
|
|
|
|
"填报人": item.user_info.name,
|
2023-06-30 17:18:31 +08:00
|
|
|
|
"部门": department_name_dic.get(item.department),
|
|
|
|
|
"内容": item.content,
|
2023-07-04 16:44:51 +08:00
|
|
|
|
"分类": item.sub_type,
|
2023-06-30 17:18:31 +08:00
|
|
|
|
"填报时间": item.daily_time.strftime('%Y-%m-%d %H:%M:%S')}
|
|
|
|
|
if item.type.name == '董监高日报':
|
|
|
|
|
daily_items['董监高日报'].append(daily_item)
|
|
|
|
|
elif item.type.name == '部门子公司日报':
|
|
|
|
|
daily_items['部门子公司日报'].append(daily_item)
|
|
|
|
|
elif item.type.name == '监管和同业动态':
|
|
|
|
|
daily_items['监管和同业动态'].append(daily_item)
|
|
|
|
|
elif item.type.name == '行业信息专题分析':
|
|
|
|
|
daily_items['行业信息专题分析'].append(daily_item)
|
|
|
|
|
|
2023-06-30 17:30:34 +08:00
|
|
|
|
with open(os.getcwd() + "\\Config\\word_data_temp.json", encoding='utf-8') as f:
|
2023-06-30 17:18:31 +08:00
|
|
|
|
temp = json.load(f)
|
2023-07-03 17:08:00 +08:00
|
|
|
|
department_id = [item.id for item in db.query(Department).filter(Department.type == '董监高')]
|
|
|
|
|
leaders = db.query(User).filter(or_(*[func.find_in_set(str(d_id), User.department) for d_id in department_id]))
|
|
|
|
|
config = {
|
|
|
|
|
'领导动态': [item.name for item in leaders],
|
2023-06-30 17:18:31 +08:00
|
|
|
|
'部门分类': get_department_config(db)
|
|
|
|
|
}
|
|
|
|
|
|
2023-07-04 16:44:51 +08:00
|
|
|
|
def generate_template():
|
|
|
|
|
"""
|
|
|
|
|
根据高管名称、部门数据生成日报模板
|
|
|
|
|
"""
|
|
|
|
|
with open(os.getcwd() + "\\Config\\word_data_template.json", encoding='utf-8') as f:
|
|
|
|
|
template = json.load(f)
|
|
|
|
|
|
|
|
|
|
def list_write_docx(write_list, tep_doc):
|
|
|
|
|
for inx in range(len(write_list)):
|
|
|
|
|
for para in tep_doc.paragraphs:
|
|
|
|
|
try:
|
|
|
|
|
# 检查上一段是否包含特定文字
|
|
|
|
|
if write_list[inx] in para.text:
|
|
|
|
|
""" 在给定的段落之后插入一个新的段落 """
|
|
|
|
|
new_paragraph = Document().add_paragraph()
|
|
|
|
|
new_run = new_paragraph.add_run(write_list[inx + 1])
|
|
|
|
|
|
|
|
|
|
# 设置字体样式
|
|
|
|
|
new_run.font.name = '仿宋' # 字体
|
|
|
|
|
new_run._element.rPr.rFonts.set(qn('w:eastAsia'), '仿宋')
|
|
|
|
|
new_run.font.size = Pt(16) # 字号:三号字体对应16磅
|
|
|
|
|
|
|
|
|
|
new_paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.LEFT
|
|
|
|
|
# 设置首行缩进:1英寸约等于28.35磅,2字符约等于1/2英寸
|
|
|
|
|
new_paragraph.paragraph_format.first_line_indent = Inches(2 * 0.56 / 2.54)
|
|
|
|
|
|
|
|
|
|
# 设置段落前缩进:1英寸约等于2.54厘米
|
|
|
|
|
new_paragraph.paragraph_format.left_indent = Inches(1.09 / 2.54)
|
|
|
|
|
|
|
|
|
|
# 设置行距
|
|
|
|
|
new_paragraph.paragraph_format.line_spacing = Pt(28)
|
|
|
|
|
|
|
|
|
|
new_element = new_paragraph._element
|
|
|
|
|
para._element.addnext(new_element)
|
|
|
|
|
except IndexError:
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
# 生成领导模板 => 董事长杨秋岭:{{杨秋岭}}
|
|
|
|
|
position = template.get('领导职位')
|
|
|
|
|
leader = template.get('公司领导动态')
|
|
|
|
|
leader_temp = ['公司领导动态']
|
|
|
|
|
for le in leader:
|
|
|
|
|
leader_temp.append(position.get(le) + le + ':' + '{{{{{}}}}}'.format(le))
|
|
|
|
|
|
|
|
|
|
# 将数据写入docx中
|
|
|
|
|
dt_path = os.path.join(os.getcwd(), 'Config', 'daily_out_temp.docx')
|
|
|
|
|
dt_doc = Document(dt_path)
|
|
|
|
|
list_write_docx(write_list=leader_temp, tep_doc=dt_doc)
|
|
|
|
|
|
|
|
|
|
# 生成各部门和分子公司动态模板
|
|
|
|
|
trends = template.get('各部门和分子公司动态')
|
|
|
|
|
for key, val in trends.items():
|
|
|
|
|
val_list = [key]
|
|
|
|
|
for index, item in enumerate(val):
|
2023-07-05 14:13:30 +08:00
|
|
|
|
idx = str(index + 1) + '、' + item
|
2023-07-04 16:44:51 +08:00
|
|
|
|
val_list.append(idx)
|
|
|
|
|
val_list.append('{{{{{}}}}}'.format(item))
|
|
|
|
|
list_write_docx(write_list=val_list, tep_doc=dt_doc)
|
|
|
|
|
|
|
|
|
|
dt_doc.save(os.path.join(os.getcwd(), 'Config', 'daily_out_temps.docx'))
|
|
|
|
|
|
|
|
|
|
generate_template()
|
|
|
|
|
|
2023-06-30 17:18:31 +08:00
|
|
|
|
def format_conversion(string):
|
2023-07-01 17:50:54 +08:00
|
|
|
|
if not string:
|
|
|
|
|
return ""
|
2023-07-03 09:54:16 +08:00
|
|
|
|
string = string.replace(";", ";")
|
|
|
|
|
parts = string.split(";")
|
2023-06-30 17:18:31 +08:00
|
|
|
|
formatted_parts = []
|
|
|
|
|
for part in parts:
|
|
|
|
|
part = part.replace("。", "")
|
|
|
|
|
split_part = part.split(".", 1)
|
|
|
|
|
if len(split_part) > 1:
|
|
|
|
|
formatted_parts.append(split_part[1].strip())
|
|
|
|
|
# 拼接处理后的部分
|
|
|
|
|
formatted_string = ";".join(formatted_parts)
|
|
|
|
|
return formatted_string
|
|
|
|
|
|
|
|
|
|
# 处理公司领导动态
|
|
|
|
|
lead = temp.get('公司领导动态')
|
|
|
|
|
for k1 in lead.keys():
|
|
|
|
|
lead_data = daily_items.get('董监高日报')
|
|
|
|
|
for item in lead_data:
|
|
|
|
|
if item.get('填报人') == k1:
|
|
|
|
|
content = item.get('内容')
|
|
|
|
|
res = format_conversion(content)
|
|
|
|
|
lead[k1] = res + "。"
|
|
|
|
|
|
|
|
|
|
part_dict = {
|
|
|
|
|
"业务发展部上海一部": "业务发展部(上海)一部",
|
|
|
|
|
"业务发展部上海二部": "业务发展部(上海)二部",
|
|
|
|
|
"业务发展部上海三部": "业务发展部(上海)三部",
|
|
|
|
|
"业务发展部西南部": "业务发展部(西南)部"
|
|
|
|
|
}
|
|
|
|
|
# 处理各部门和分子公司动态
|
|
|
|
|
department = temp.get('各部门和分子公司动态')
|
|
|
|
|
for k2 in department.keys():
|
|
|
|
|
department[k2] = list()
|
|
|
|
|
depart_data = daily_items.get('部门子公司日报')
|
|
|
|
|
for item in depart_data:
|
|
|
|
|
if k2 in part_dict:
|
|
|
|
|
copy_k2 = part_dict.get(k2)
|
|
|
|
|
if item.get('部门') == copy_k2:
|
|
|
|
|
content = item.get('内容')
|
|
|
|
|
res = format_conversion(content)
|
|
|
|
|
department[k2].append(res + '。' + '({}报送)'.format(item.get('填报人')))
|
|
|
|
|
else:
|
|
|
|
|
if item.get('部门') == k2:
|
|
|
|
|
content = item.get('内容')
|
|
|
|
|
res = format_conversion(content)
|
|
|
|
|
department[k2].append(res + '。' + '({}报送)'.format(item.get('填报人')))
|
|
|
|
|
if department[k2]:
|
2023-07-03 09:54:16 +08:00
|
|
|
|
if len(department[k2]) == 1:
|
|
|
|
|
department[k2] = department[k2][0]
|
|
|
|
|
else:
|
|
|
|
|
first = department[k2].pop(0)
|
|
|
|
|
department[k2] = "\n".join(department[k2])
|
|
|
|
|
department[k2] = "\n".join([" " + line for line in department[k2].split("\n")])
|
|
|
|
|
department[k2] = first + '\n' + department[k2]
|
2023-06-30 17:18:31 +08:00
|
|
|
|
else:
|
|
|
|
|
department[k2] = '未报送'
|
|
|
|
|
|
|
|
|
|
# 监管和同业动态
|
|
|
|
|
supervise = temp.get('监管和同业动态')
|
|
|
|
|
for k3 in supervise.keys():
|
|
|
|
|
supervise[k3] = list()
|
|
|
|
|
supervise_data = daily_items.get('监管和同业动态')
|
|
|
|
|
for item in supervise_data:
|
2023-07-04 16:44:51 +08:00
|
|
|
|
if item.get('分类') == k3:
|
|
|
|
|
content = item.get('内容').replace("\n", "")
|
|
|
|
|
content = content.replace(' ', '')
|
|
|
|
|
content = ' '.join(content.split())
|
2023-06-30 17:18:31 +08:00
|
|
|
|
content = content + '({}报送)'.format(item.get('填报人'))
|
|
|
|
|
supervise[k3].append(content)
|
|
|
|
|
if supervise[k3]:
|
2023-07-03 09:54:16 +08:00
|
|
|
|
if len(supervise[k3]) == 1:
|
|
|
|
|
supervise[k3] = supervise[k3][0]
|
|
|
|
|
else:
|
|
|
|
|
first = supervise[k3].pop(0)
|
|
|
|
|
first = '1、' + first
|
|
|
|
|
for i in range(len(supervise[k3])):
|
|
|
|
|
num = i + 2
|
|
|
|
|
supervise[k3][i] = f'{num}、' + supervise[k3][i]
|
|
|
|
|
supervise[k3] = "\n".join(supervise[k3])
|
|
|
|
|
supervise[k3] = "\n".join([" " + line for line in supervise[k3].split("\n")])
|
|
|
|
|
supervise[k3] = first + '\n' + supervise[k3]
|
2023-06-30 17:18:31 +08:00
|
|
|
|
else:
|
|
|
|
|
supervise[k3] = '未报送'
|
|
|
|
|
|
|
|
|
|
# 行业信息专题分析
|
|
|
|
|
subject = temp.get('行业信息专题分析')
|
|
|
|
|
for k4 in subject.keys():
|
|
|
|
|
subject[k4] = list()
|
|
|
|
|
subject_data = daily_items.get('行业信息专题分析')
|
|
|
|
|
for item in subject_data:
|
|
|
|
|
content = item.get('内容').replace("/n", "")
|
2023-07-04 16:44:51 +08:00
|
|
|
|
content = content.replace(' ', '')
|
|
|
|
|
content = ' '.join(content.split())
|
2023-06-30 17:18:31 +08:00
|
|
|
|
content = content + '({}报送)'.format(item.get('填报人'))
|
|
|
|
|
subject[k4].append(content)
|
|
|
|
|
|
|
|
|
|
if subject[k4]:
|
2023-07-03 09:54:16 +08:00
|
|
|
|
if len(subject[k4]) == 1:
|
|
|
|
|
subject[k4] = subject[k4][0]
|
|
|
|
|
else:
|
|
|
|
|
first = subject[k4].pop(0)
|
|
|
|
|
first = '1、' + first
|
|
|
|
|
for i in range(len(subject[k4])):
|
|
|
|
|
num = i + 2
|
|
|
|
|
subject[k4][i] = f'{num}、' + subject[k4][i]
|
|
|
|
|
subject[k4] = "\n".join(subject[k4])
|
|
|
|
|
subject[k4] = "\n".join([" " + line for line in subject[k4].split("\n")])
|
|
|
|
|
subject[k4] = first + '\n' + subject[k4]
|
2023-06-30 17:18:31 +08:00
|
|
|
|
else:
|
|
|
|
|
subject[k4] = '未报送'
|
|
|
|
|
merged_dict = {
|
2023-07-01 13:49:32 +08:00
|
|
|
|
"年": req.day.year,
|
|
|
|
|
"月": req.day.month,
|
|
|
|
|
"日": req.day.day
|
2023-06-30 17:18:31 +08:00
|
|
|
|
}
|
|
|
|
|
merged_dict.update(department)
|
|
|
|
|
merged_dict.update(lead)
|
|
|
|
|
merged_dict.update(supervise)
|
|
|
|
|
merged_dict.update(subject)
|
|
|
|
|
|
|
|
|
|
# 文件模板
|
2023-07-04 16:44:51 +08:00
|
|
|
|
doc_path = os.path.join(os.getcwd(), 'Config', 'daily_out_temps.docx')
|
2023-06-30 17:18:31 +08:00
|
|
|
|
doc = DocxTemplate(doc_path)
|
|
|
|
|
doc.render(merged_dict)
|
2023-07-01 17:50:54 +08:00
|
|
|
|
# today = datetime.date.today()
|
|
|
|
|
|
|
|
|
|
date_string = req.day.strftime('%Y%m%d')
|
2023-06-30 17:18:31 +08:00
|
|
|
|
file_path = os.path.join(os.getcwd(), 'static_data', 'daily_out_temp', '运行日报{}.docx'.format(date_string))
|
|
|
|
|
doc.save(file_path)
|
|
|
|
|
|
2023-06-29 17:24:38 +08:00
|
|
|
|
temp_path = "static_data/daily_out_temp"
|
|
|
|
|
if not os.path.exists(temp_path):
|
|
|
|
|
os.makedirs(temp_path)
|
2023-06-30 17:18:31 +08:00
|
|
|
|
file_name = "运行日报{}.docx".format(date_string)
|
2023-06-29 17:24:38 +08:00
|
|
|
|
file_path = f"{temp_path}/{file_name}"
|
|
|
|
|
return {"url": "/" + file_path}
|
2023-07-05 14:13:30 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@router.post("/sensitive_word_verification", summary="敏感词校验")
|
|
|
|
|
def daily_export_to_pdf(word_str: str):
|
|
|
|
|
json_filepath = os.path.join(os.getcwd(), 'Config', 'sensitive_word.json')
|
|
|
|
|
# 将列表保存为 json 文件
|
|
|
|
|
with open(json_filepath, 'r', encoding='utf-8') as json_file:
|
|
|
|
|
words = json.load(json_file)
|
|
|
|
|
|
|
|
|
|
class SensitiveWordChecker:
|
|
|
|
|
def __init__(self, sensitive_words):
|
|
|
|
|
self.automaton = Automaton()
|
|
|
|
|
for idx, word in enumerate(sensitive_words):
|
|
|
|
|
self.automaton.add_word(word, (idx, word))
|
|
|
|
|
self.automaton.make_automaton()
|
|
|
|
|
|
|
|
|
|
def check(self, text):
|
|
|
|
|
return {item for _, item in self.automaton.iter(text)}
|
|
|
|
|
|
|
|
|
|
checker = SensitiveWordChecker(words)
|
|
|
|
|
|
|
|
|
|
sensitive_words_in_text = checker.check(word_str)
|
|
|
|
|
return sensitive_words_in_text
|