# -*- coding: utf-8 -*- import asyncio import io import json from datetime import datetime from typing import Any from fastapi import UploadFile import pandas as pd from redis.asyncio.client import Redis from app.core.database import async_db_session from app.core.redis_crud import RedisCURD from app.common.enums import RedisInitKeyConfig from app.core.base_schema import BatchSetAvailable from app.core.exceptions import CustomException from app.utils.excel_util import ExcelUtil from app.core.logger import log from app.api.v1.module_system.auth.schema import AuthSchema from .schema import BizVarDictCreateSchema, BizVarDictUpdateSchema, BizVarDictOutSchema, BizVarDictQueryParam from ..crane.crud import BizCraneCRUD from ..crane.model import BizCraneModel from ..gateway.crud import GatewayCRUD from ..gateway.model import GatewayModel from ..mec.crud import BizMecCRUD from ..vardict.crud import BizVarDictCRUD from ..vardict.schema import VarDictMecGroupSchema from app.utils.tdengine_util import tdengine_rest_query, format_rest_result, get_table_total_count class BizVarDictService: """ 变量信息服务层 """ @classmethod async def detail_vardict_service(cls, auth: AuthSchema, id: int) -> dict: """详情""" obj = await BizVarDictCRUD(auth).get_by_id_vardict_crud(id=id) if not obj: raise CustomException(msg="该数据不存在") crane = await BizCraneCRUD(auth).get_by_id_crane_crud_for_no(obj.crane_no) gateway = await GatewayCRUD(auth).get_by_id_gateway_crud(obj.gateway_id) res = BizVarDictOutSchema.model_validate(obj).model_dump() res['crane_name'] = crane.crane_name res['gateway_name'] = gateway.gateway_name if gateway else "" return res @classmethod async def list_vardict_service(cls, auth: AuthSchema, search: BizVarDictQueryParam | None = None, order_by: list[dict] | None = None) -> list[dict]: """列表查询""" search_dict = search.__dict__ if search else None obj_list = await BizVarDictCRUD(auth).list_vardict_crud(search=search_dict, order_by=order_by) return [BizVarDictOutSchema.model_validate(obj).model_dump() for obj in obj_list] @classmethod async def page_vardict_service(cls, auth: AuthSchema, page_no: int, page_size: int, search: BizVarDictQueryParam | None = None, order_by: list[dict] | None = None) -> dict: """分页查询(数据库分页)""" search_dict = search.__dict__ if search else {} order_by_list = order_by or [{'id': 'asc'}] offset = (page_no - 1) * page_size result = await BizVarDictCRUD(auth).page_vardict_crud( offset=offset, limit=page_size, order_by=order_by_list, search=search_dict ) for item in result.get('items'): crane_model: BizCraneModel | None = await BizCraneCRUD(auth).get_by_id_crane_crud_for_no(crane_no=item['crane_no']) gateway_model: GatewayModel | None = await GatewayCRUD(auth).get_by_id_gateway_crud(id=item['gateway_id']) item['crane_name'] = crane_model.crane_name item['gateway_name'] = gateway_model.gateway_name if gateway_model else "" return result @classmethod async def vardict_alarms_list(cls, auth: AuthSchema, crane_no: str = None) -> list[dict]: sql_parts = [ """SELECT a.*,b.crane_name FROM biz_var_dict as a LEFT JOIN biz_crane as b ON a.crane_no = b.crane_no WHERE a.`status` = :status AND b.`status` = :status AND a.switch_type >= 2""" ] business_params: dict[str, Any] = {"status": 1} if crane_no and isinstance(crane_no, str) and crane_no.strip(): valid_crane_no = crane_no.strip() sql_parts.append(f"AND a.crane_no = :crane_no") business_params["crane_no"] = valid_crane_no sql_parts.append("ORDER BY b.`order` asc,a.mec_type asc,a.var_sort asc") final_sql = " ".join(sql_parts) try: obj_list = await BizVarDictCRUD(auth).list_sql(final_sql, business_params) return [BizVarDictOutSchema.model_validate(obj).model_dump() for obj in obj_list] except Exception as e: raise CustomException(msg=f"查询变量字典报警列表失败:{str(e)}") @classmethod async def vardict_analog_list(cls, auth: AuthSchema, crane_no: str = None) -> list[dict]: sql_parts = [ """SELECT a.*,b.crane_name FROM biz_var_dict as a LEFT JOIN biz_crane as b ON a.crane_no = b.crane_no WHERE a.`status` = :status AND b.`status` = :status AND a.data_type >= 2""" ] business_params: dict[str, Any] = {"status": 1} if crane_no and isinstance(crane_no, str) and crane_no.strip(): valid_crane_no = crane_no.strip() sql_parts.append(f"AND a.crane_no = :crane_no") business_params["crane_no"] = valid_crane_no sql_parts.append("ORDER BY b.`order` asc,a.mec_type asc,a.var_sort asc") final_sql = " ".join(sql_parts) try: obj_list = await BizVarDictCRUD(auth).list_sql(final_sql, business_params) return [BizVarDictOutSchema.model_validate(obj).model_dump() for obj in obj_list] except Exception as e: raise CustomException(msg=f"查询变量字典模拟量列表失败:{str(e)}") @classmethod async def create_vardict_service(cls, auth: AuthSchema, data: BizVarDictCreateSchema,redis: Redis) -> dict: """创建""" # 检查唯一性约束 obj = await BizVarDictCRUD(auth).create_vardict_crud(data=data) if obj: # 更新缓存中数据 await RedisCURD(redis).clear(f"{RedisInitKeyConfig.VAR_DICT.key}:{data.crane_no}") return BizVarDictOutSchema.model_validate(obj).model_dump() @classmethod async def update_vardict_service(cls, auth: AuthSchema, id: int, data: BizVarDictUpdateSchema,redis: Redis) -> dict: """更新""" # 检查数据是否存在 obj = await BizVarDictCRUD(auth).get_by_id_vardict_crud(id=id) if not obj: raise CustomException(msg='更新失败,该数据不存在') # 检查唯一性约束 obj = await BizVarDictCRUD(auth).update_vardict_crud(id=id, data=data) if obj: # 更新缓存中数据 await RedisCURD(redis).clear(f"{RedisInitKeyConfig.VAR_DICT.key}:{obj.crane_no}") return BizVarDictOutSchema.model_validate(obj).model_dump() @classmethod async def delete_vardict_service(cls, auth: AuthSchema, ids: list[int],redis: Redis) -> None: """删除""" if len(ids) < 1: raise CustomException(msg='删除失败,删除对象不能为空') crane_nos = [] for id in ids: obj = await BizVarDictCRUD(auth).get_by_id_vardict_crud(id=id) if not obj: raise CustomException(msg=f'删除失败,ID为{id}的数据不存在') crane_nos.append(obj.crane_no) await BizVarDictCRUD(auth).delete_vardict_crud(ids=ids) # 更新缓存中数据 for crane_no in crane_nos: await RedisCURD(redis).clear(f"{RedisInitKeyConfig.VAR_DICT.key}:{crane_no}") @classmethod async def set_availale_vardict_service(cls, auth: AuthSchema, data: BatchSetAvailable,redis: Redis) -> None: crane_nos = [] for id in data.ids: obj = await BizVarDictCRUD(auth).get_by_id_vardict_crud(id=id) if not obj: raise CustomException(msg=f'批量设置失败,ID为{id}的数据不存在') crane_nos.append(obj.crane_no) """批量设置状态""" await BizVarDictCRUD(auth).set_available_vardict_crud(ids=data.ids, status=data.status) # 更新缓存中数据 for crane_no in crane_nos: await RedisCURD(redis).clear(f"{RedisInitKeyConfig.VAR_DICT.key}:{crane_no}") @classmethod async def batch_export_vardict_service(cls, obj_list: list[dict]) -> bytes: """批量导出""" mapping_dict = { 'id': 'id ', 'crane_no': '', 'var_code': '变量code', 'var_name': '变量名称', 'mec_type': '所属机构', 'data_type': '数据类型', 'switch_type': '变量类型', 'addr': 'modbus地址', 'gateway_id': '网关', 'var_sort': '排序', 'var_group': '变量分组', 'var_category': '变量分类', 'translate': '绑定公式', 'device_no': '关联设备编号 ', 'is_reverse': '是否取反', 'is_top_show': '是否重点显示', 'is_save': '是否生成', 'is_calibration': '是否标定', 'is_overview_top_show': '是否首页重点显示', 'is_home_page_show': '是否首页显示', 'is_diagnose': '是否启用诊断专家', 'is_upload': '是否上传云平台', 'diagnosis_id': '关联诊断专家', 'status': '是否启用', 'description': '备注/描述', 'create_time': '创建时间 ', 'updated_time': '更新时间', 'created_id': '创建人ID', 'updated_id': '更新人ID', } data = obj_list.copy() for item in data: # 状态转换 if 'status' in item: item['status'] = '启用' if item.get('status') == '0' else '停用' # 创建者转换 creator_info = item.get('creator') if isinstance(creator_info, dict): item['creator'] = creator_info.get('name', '未知') elif creator_info is None: item['creator'] = '未知' return ExcelUtil.export_list2excel(list_data=data, mapping_dict=mapping_dict) @classmethod async def batch_import_vardict_service(cls, auth: AuthSchema, file: UploadFile, update_support: bool = False) -> str: """批量导入""" header_dict = { 'id ': 'id', '': 'crane_no', '变量code': 'var_code', '变量名称': 'var_name', '所属机构': 'mec_type', '数据类型': 'data_type', '变量类型': 'switch_type', 'modbus地址': 'addr', '网关': 'gateway_id', '排序': 'var_sort', '变量分组': 'var_group', '变量分类': 'var_category', '绑定公式': 'translate', '关联设备编号 ': 'device_no', '是否取反': 'is_reverse', '是否重点显示': 'is_top_show', '是否生成': 'is_save', '是否标定': 'is_calibration', '是否首页重点显示': 'is_overview_top_show', '是否首页显示': 'is_home_page_show', '是否启用诊断专家': 'is_diagnose', '是否上传云平台': 'is_upload', '关联诊断专家': 'diagnosis_id', '是否启用': 'status', '备注/描述': 'description', '创建时间 ': 'create_time', '更新时间': 'updated_time', '创建人ID': 'created_id', '更新人ID': 'updated_id', } try: contents = await file.read() df = pd.read_excel(io.BytesIO(contents)) await file.close() if df.empty: raise CustomException(msg="导入文件为空") missing_headers = [header for header in header_dict.keys() if header not in df.columns] if missing_headers: raise CustomException(msg=f"导入文件缺少必要的列: {', '.join(missing_headers)}") df.rename(columns=header_dict, inplace=True) # 验证必填字段 error_msgs = [] success_count = 0 count = 0 for index, row in df.iterrows(): count += 1 try: data = { "id": row['id'], "crane_no": row['crane_no'], "var_code": row['var_code'], "var_name": row['var_name'], "mec_type": row['mec_type'], "data_type": row['data_type'], "switch_type": row['switch_type'], "addr": row['addr'], "gateway_id": row['gateway_id'], "var_sort": row['var_sort'], "var_group": row['var_group'], "var_category": row['var_category'], "translate": row['translate'], "device_no": row['device_no'], "is_reverse": row['is_reverse'], "is_top_show": row['is_top_show'], "is_save": row['is_save'], "is_calibration": row['is_calibration'], "is_overview_top_show": row['is_overview_top_show'], "is_home_page_show": row['is_home_page_show'], "is_diagnose": row['is_diagnose'], "is_upload": row['is_upload'], "diagnosis_id": row['diagnosis_id'], "status": row['status'], "description": row['description'], "create_time": row['create_time'], "updated_time": row['updated_time'], "created_id": row['created_id'], "updated_id": row['updated_id'], } # 使用CreateSchema做校验后入库 create_schema = BizVarDictCreateSchema.model_validate(data) # 检查唯一性约束 await BizVarDictCRUD(auth).create_vardict_crud(data=create_schema) success_count += 1 except Exception as e: error_msgs.append(f"第{count}行: {str(e)}") continue result = f"成功导入 {success_count} 条数据" if error_msgs: result += "\n错误信息:\n" + "\n".join(error_msgs) return result except Exception as e: log.error(f"批量导入失败: {str(e)}") raise CustomException(msg=f"导入失败: {str(e)}") @classmethod async def import_template_download_vardict_service(cls) -> bytes: """下载导入模板""" header_list = [ 'id ', '', '变量code', '变量名称', '所属机构', '数据类型', '变量类型', 'modbus地址', '网关', '排序', '变量分组', '变量分类', '绑定公式', '关联设备编号 ', '是否取反', '是否重点显示', '是否生成', '是否标定', '是否首页重点显示', '是否首页显示', '是否启用诊断专家', '是否上传云平台', '关联诊断专家', '是否启用', '备注/描述', '创建时间 ', '更新时间', '创建人ID', '更新人ID', ] selector_header_list = [] option_list = [] # 添加下拉选项 selector_header_list.append('所属机构') option_list.append({'所属机构': []}) selector_header_list.append('数据类型') option_list.append({'数据类型': []}) selector_header_list.append('变量类型') option_list.append({'变量类型': []}) selector_header_list.append('变量分类') option_list.append({'变量分类': []}) return ExcelUtil.get_excel_template( header_list=header_list, selector_header_list=selector_header_list, option_list=option_list ) @classmethod async def get_vardict_group_service(cls, auth: AuthSchema,redis: Redis,crane_no: str): """ 从缓存获取变量分组数据列表信息service 参数: - redis (Redis): Redis客户端 - id (int): 行车id 返回: - list[dict]: 变量分组数据列表 """ try: redis_key = f"{RedisInitKeyConfig.VAR_DICT.key}:{crane_no}" obj_list_dict = await RedisCURD(redis).get(redis_key) # 确保返回数据正确序列化 if obj_list_dict: if isinstance(obj_list_dict, str): try: return json.loads(obj_list_dict) except json.JSONDecodeError: log.warning(f"变量分组数据反序列化失败,尝试重新初始化缓存: {'行车:'+crane_no}") elif isinstance(obj_list_dict, list): return obj_list_dict # 缓存不存在或格式错误时重新初始化 await cls.init_vardict_service(redis,crane_no=crane_no) obj_list_dict = await RedisCURD(redis).get(redis_key) if not obj_list_dict: raise CustomException(msg="变量分组数据不存在") # 再次确保返回数据正确序列化 if isinstance(obj_list_dict, str): try: return json.loads(obj_list_dict) except json.JSONDecodeError: raise CustomException(msg="变量分组数据格式错误") return obj_list_dict except CustomException: raise except Exception as e: log.error(f"获取变量分组数据缓存失败: {str(e)}") raise CustomException(msg=f"获取变量分组数据失败: {str(e)}") @classmethod async def get_vardict_alarms_service(cls, auth: AuthSchema, redis: Redis): """ 从缓存获取变量分组数据列表信息service 参数: - redis (Redis): Redis客户端 - id (int): 行车id 返回: - list[dict]: 变量分组数据列表 """ try: redis_key = f"{RedisInitKeyConfig.VAR_DICT.key}:'alarms_all'" obj_list_dict = await RedisCURD(redis).get(redis_key) # 确保返回数据正确序列化 if obj_list_dict: if isinstance(obj_list_dict, str): try: return json.loads(obj_list_dict) except json.JSONDecodeError: log.warning(f"变量报警数据反序列化失败,尝试重新初始化缓存") elif isinstance(obj_list_dict, list): return obj_list_dict # 缓存不存在或格式错误时重新初始化 await cls.init_vardict_service(redis) obj_list_dict = await RedisCURD(redis).get(redis_key) if not obj_list_dict: raise CustomException(msg="变量报警数据不存在") # 再次确保返回数据正确序列化 if isinstance(obj_list_dict, str): try: return json.loads(obj_list_dict) except json.JSONDecodeError: raise CustomException(msg="变量报警数据格式错误") return obj_list_dict except CustomException: raise except Exception as e: log.error(f"获取变量报警数据缓存失败: {str(e)}") raise CustomException(msg=f"获取变量报警数据失败: {str(e)}") @classmethod async def init_vardict_service(cls, redis: Redis,crane_no:str = None): """ 应用初始化: 获取所有天车变量数据信息并缓存service 参数: - redis (Redis): Redis客户端 返回: - None """ try: async with async_db_session() as session: async with session.begin(): # 在初始化过程中,不需要检查数据权限 auth = AuthSchema(db=session, check_data_scope=False) #初始化行车机构分组变量数据 if crane_no: search = {'status':'1','crane_no':crane_no} else: search = {'status': '1'} crane_list = await BizCraneCRUD(auth).list(search=search,order_by=[{'order':'asc'}]) success_count = 0 fail_count = 0 for crane in crane_list: crane_no = crane.crane_no crane_name = crane.crane_name try: varDictMecGroupSchemaList: list[VarDictMecGroupSchema] = [] mec_list = await BizMecCRUD(auth).list(search={'crane_no':crane_no,'status':'1'},order_by=[{'sort':'asc'}]) for mec in mec_list: # 获取分组数据 mecVarDicts = await BizVarDictCRUD(auth).list( search={'crane_no': crane_no, 'mec_type': mec.mec_type, 'status': '1'}, order_by=[{'var_sort': 'asc'}]) if not mecVarDicts: continue alarmVarList = await BizVarDictCRUD(auth).list(search={'crane_no': crane_no,'mec_type':mec.mec_type, 'switch_type': ('>=','2'), 'status': '1'},order_by=[{'var_sort': 'asc'}]) digitalVarList = await BizVarDictCRUD(auth).list(search={'crane_no':crane_no,'mec_type':mec.mec_type,'data_type':'1','status':'1'},order_by=[{'var_sort':'asc'}]) analogVarList = await BizVarDictCRUD(auth).list(search={'crane_no': crane_no,'mec_type':mec.mec_type, 'data_type': ('!=', '1'), 'status': '1'},order_by=[{'var_sort': 'asc'}]) varDictMecGroupSchemaList.append( VarDictMecGroupSchema(mec_type=mec.mec_type, mecVarList_simple=mecVarDicts, digital_varList=digitalVarList, analog_varList=analogVarList, alarm_varList=alarmVarList)) # 保存到Redis并设置过期时间 redis_key = f"{RedisInitKeyConfig.VAR_DICT.key}:{crane_no}" var_dict_list = [item.model_dump() for item in varDictMecGroupSchemaList] value = json.dumps(var_dict_list, ensure_ascii=False) await RedisCURD(redis).set( key=redis_key, value=value, ) success_count += 1 log.info(f"✅ 机构分组变量数据缓存成功: {crane_name}") except Exception as e: fail_count += 1 log.error(f"❌ 初始化机构分组变量数据失败 [{crane_name}]: {e}") log.info(f"机构分组变量数据初始化完成 - 成功: {success_count}, 失败: {fail_count}") #初始化所有行车报警变量数据 try: varDicts = await cls.vardict_alarms_list(auth=auth) redis_key = f"{RedisInitKeyConfig.VAR_DICT.key}:'alarms_all'" value = json.dumps(varDicts, ensure_ascii=False) await RedisCURD(redis).set( key=redis_key, value=value, ) log.info(f"✅ 报警变量数据缓存成功") except Exception as e: log.error(f"❌ 初始化报警变量数据失败: {e}") except Exception as e: log.error(f"变量数据初始化过程发生错误: {e}") # 只在严重错误时抛出异常,允许单个字典加载失败 raise CustomException(msg=f"变量数据初始化失败: {str(e)}") @classmethod async def get_tdengine_data(cls, auth: AuthSchema, page_no: int, page_size: int,stable_name:str, search: BizVarDictQueryParam | None = None) -> dict: var_dict_search_dict = {'crane_no':search.crane_no,'data_type':search.data_type,'mec_type':search.mec_type} offset = (page_no - 1) * page_size base_sql = "SELECT * FROM "+stable_name filter_conditions = [] crane_no = search.crane_no mec_type = search.mec_type if crane_no: safe_crane_no = crane_no.strip().replace("'", "''") filter_conditions.append(f"crane_no = '{safe_crane_no}'") if mec_type: mec_var_dict = await BizVarDictCRUD(auth).list(search=var_dict_search_dict) var_codes = [item.var_code for item in mec_var_dict if item.var_code] if var_codes: var_codes_str = "','".join(var_codes) filter_conditions.append(f"var_code IN ('{var_codes_str}')") # 4. 过滤条件2:created_time时间范围(新增核心逻辑) created_time = search.created_time if created_time and isinstance(created_time, tuple) and len(created_time) == 2: # 解析between条件:格式为('between', (start_time, end_time)) condition_type, time_range = created_time if condition_type == "between" and isinstance(time_range, (list, tuple)) and len(time_range) == 2: start_time, end_time = time_range # 校验时间类型并格式化为TDengine支持的字符串 if isinstance(start_time, datetime) and isinstance(end_time, datetime): # 格式化时间为"YYYY-MM-DD HH:MM:SS"(匹配TDengine的时间格式) start_str = start_time.strftime("%Y-%m-%d %H:%M:%S") end_str = end_time.strftime("%Y-%m-%d %H:%M:%S") # 防SQL注入:转义单引号(虽然时间格式不会有,但做兜底) safe_start = start_str.replace("'", "''") safe_end = end_str.replace("'", "''") # 添加时间范围条件(TDengine的ts字段对应创建时间) filter_conditions.append(f"ts BETWEEN '{safe_start}' AND '{safe_end}'") # 5. 拼接WHERE子句 where_clause = " WHERE " + " AND ".join(filter_conditions) if filter_conditions else "" # 6. 构建完整查询SQL(排序+分页) if page_size == 1000: #历史曲线用 query_sql = f"{base_sql}{where_clause} ORDER BY ts DESC" else: query_sql = f"{base_sql}{where_clause} ORDER BY ts DESC LIMIT {offset}, {page_size}" rest_result = await tdengine_rest_query(query_sql) formatted_data = await format_rest_result(rest_result) #查找var_name varDicts = await BizVarDictCRUD(auth).list(search=var_dict_search_dict) if formatted_data: for item in formatted_data: normal_time = item.get('ts').replace('T', ' ').replace('+08:00', '') item['ts'] = normal_time for varDict in varDicts: if item.get('var_code') == varDict.var_code: item['var_name'] = varDict.var_name break total = await get_table_total_count(stable_name, where_clause) return { "page_no": page_no, "page_size": page_size, "total": total, "has_next": offset + page_size < total, "items": formatted_data } @classmethod async def get_tdengine_data_test(cls, auth: AuthSchema, page_no: int, page_size: int, stable_name: str, search: BizVarDictQueryParam | None = None) -> dict: var_dict_search_dict = {'crane_no': search.crane_no, 'data_type': search.data_type} offset = (page_no - 1) * page_size # 拼接SQL(替换时间占位符,防注入) base_sql = f""" WITH target_data AS ( SELECT var_code, ts, val, LAG(val) OVER (PARTITION BY var_code ORDER BY ts) AS prev_val, LAST_VALUE(val) OVER (PARTITION BY var_code ORDER BY ts ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS latest_val FROM st_digital WHERE val IN (0, 1) ), trigger_events AS ( SELECT var_code, ts_cn AS trigger_time, latest_val, ROW_NUMBER() OVER (PARTITION BY var_code ORDER BY ts_cn) AS trigger_batch_id FROM target_data WHERE prev_val = 0 AND val = 1 ), recover_events AS ( SELECT var_code, ts_cn AS recover_time, ROW_NUMBER() OVER (PARTITION BY var_code ORDER BY ts_cn) AS recover_batch_id FROM target_data WHERE prev_val = 1 AND val = 0 ) SELECT t.var_code, CASE t.latest_val WHEN 1 THEN '触发中' WHEN 0 THEN '已恢复' ELSE '无数据' END AS current_status, t.trigger_time, IFNULL(r.recover_time, '未恢复') AS recover_time FROM trigger_events t LEFT JOIN recover_events r ON t.var_code = r.var_code AND t.trigger_batch_id = r.recover_batch_id ORDER BY t.var_name ASC, t.trigger_time ASC; """ rest_result = await tdengine_rest_query(base_sql) formatted_data = await format_rest_result(rest_result) # 查找var_name varDicts = await BizVarDictCRUD(auth).list(search=var_dict_search_dict) if formatted_data: for item in formatted_data: normal_time = item.get('ts').replace('T', ' ').replace('+08:00', '') item['ts'] = normal_time for varDict in varDicts: if item.get('var_code') == varDict.var_code: item['var_name'] = varDict.var_name break total = await get_table_total_count(stable_name, where_clause) return { "page_no": page_no, "page_size": page_size, "total": total, "has_next": offset + page_size < total, "items": formatted_data }