一份小而精的项目范围说明书,让交付成功率翻倍
2026/1/19 15:22:17
在前三节中,我们探讨了AI安全的多个方面,包括提示注入、对抗性攻击、视觉指令绕过以及隐私保护技术。本节将综合这些内容,探讨如何构建可信AI体系,从技术实现到合规要求的全方位保障。
可信AI是指在设计、开发、部署和使用AI系统的全生命周期中,确保系统具备可靠性、安全性、公平性、透明性和可问责性等特征的AI系统。
importtorchimporttorch.nnasnnimportnumpyasnpfromtypingimportDict,List,Tuple,AnyimportloggingclassTrustedAISecurityFramework:""" 可信AI安全防护框架 """def__init__(self):self.threat_detectors={'prompt_injection':self._detect_prompt_injection,'adversarial_attack':self._detect_adversarial_attack,'visual_bypass':self._detect_visual_bypass,'data_poisoning':self._detect_data_poisoning}self.security_logger=logging.getLogger('TrustedAI.Security')self.security_log=[]defcomprehensive_security_check(self,input_data:Dict[str,Any])->Dict[str,Any]:""" 全面安全检查 Args: input_data: 输入数据字典,包含text、image等 Returns: security_report: 安全检查报告 """security_report={'timestamp':str(torch.get_device(torch.tensor(1)))iftorch.cuda.is_available()else'cpu','checks_performed':[],'threats_detected':[],'risk_level':'low','recommendations':[]}total_risk_score=0.0# 执行各项安全检查forthreat_name,detector_funcinself.threat_detectors.items():try:threat_result=detector_func(input_data)security_report['checks_performed'].append(threat_name)ifthreat_result['is_threat']:security_report['threats_detected'].append({'type':threat_name,'details':threat_result['details'],'confidence':threat_result['confidence']})# 累加风险分数total_risk_score+=threat_result['risk_score']exceptExceptionase:self.security_logger.error(f"安全检查{threat_name}执行失败:{e}")# 评估总体风险等级iftotal_risk_score>0.7:security_report['risk_level']='high'security_report['recommendations'].append('立即阻止请求并进行人工审核')eliftotal_risk_score>0.4:security_report['risk_level']='medium'security_report['recommendations'].append('需要额外验证')else:security_report['risk_level']='low'security_report['recommendations'].append('正常处理')# 记录安全日志self.security_log.append(security_report)returnsecurity_reportdef_detect_prompt_injection(self,input_data:Dict[str,Any])->Dict[str,Any]:""" 检测提示注入攻击 Args: input_data: 输入数据 Returns: detection_result: 检测结果 """text_input=input_data.get('text','')# 常见的注入攻击模式injection_patterns=[r"(?i)ignore\s+previous\s+instructions?",r"(?i)override\s+previous\s+rules?",r"(?i)disregard\s+the\s+above",r"(?i)forget\s+your\s+training",r"(?i)system\s+instruction\s*:",r"(?i)bypass\s+.*?security"]importre detected_patterns=[]forpatternininjection_patterns:ifre.search(pattern,text_input):detected_patterns.append(pattern)is_threat=len(detected_patterns)>0confidence=min(len(detected_patterns)*0.3,1.0)risk_score=confidence*0.8# 高风险威胁return{'is_threat':is_threat,'confidence':confidence,'risk_score':risk_score,'details':{'detected_patterns':detected_patterns,'text_length':len(text_input)}}def_detect_adversarial_attack(self,input_data:Dict[str,Any])->Dict[str,Any]:""" 检测对抗性攻击 Args: input_data: 输入数据 Returns: detection_result: 检测结果 """image_input=input_data.get('image')ifimage_inputisNone:return{'is_threat':False,'confidence':0.0,'risk_score':0.0