实验三:AI语音生成平台
核心代码
1. 语音生成服务 (services/voice_service.py)
import os
import re
from datetime import datetime
from pydub import AudioSegment
from dotenv import load_dotenv
from utils.aliyun_client import AliyunClient
from utils.baidu_client import BaiduClient
import io
import numpy as np
import wave# 加载环境变量
load_dotenv()def text_to_speech(text, voice_type='child', speed='normal'):"""将文本转换为语音Args:text: 要转换的文本voice_type: 声音类型 (child, male, female)speed: 语速 (slow, normal, fast 或数字)Returns:dict: 包含语音信息的字典"""# 确保uploads目录存在uploads_dir = os.path.abspath(os.getenv('UPLOAD_FOLDER', './uploads'))os.makedirs(uploads_dir, exist_ok=True)# 声音类型映射voice_map = {'child': 'aixia', # 阿里云儿童声音'male': 'zhizheng', # 阿里云男声'female': 'ruoxi' # 阿里云女声}# 语速映射speed_map = {'slow': 0.8,'normal': 1.0,'fast': 1.2}# 处理语速参数if isinstance(speed, str):speed = speed_map.get(speed, 1.0)else:# 确保语速在合理范围内speed = max(0.5, min(2.0, float(speed)))# 获取对应的声音标识voice_id = voice_map.get(voice_type, 'ruoxi')print(f"🔊 开始语音合成: 文本长度={len(text)}, 声音类型={voice_type}, 语速={speed}")# 将文本分段处理text_segments = _split_text(text)# 如果文本太短,直接处理if len(text_segments) == 1:return _synthesize_single_segment(text_segments[0], voice_id, speed, uploads_dir)# 处理多段文本audio_segments = []for i, segment in enumerate(text_segments):print(f"🔊 处理文本段 {i+1}/{len(text_segments)}")try:# 尝试阿里云语音合成segment_result = _synthesize_with_aliyun(segment, voice_id, speed)if segment_result and 'audio_data' in segment_result:audio_segments.append(segment_result['audio_data'])continue# 如果阿里云失败,尝试百度语音合成segment_result = _synthesize_with_baidu(segment, voice_type, speed)if segment_result and 'audio_data' in segment_result:audio_segments.append(segment_result['audio_data'])continue# 如果都失败,创建模拟音频segment_result = _generate_mock_audio(segment, uploads_dir)if segment_result and 'audio_data' in segment_result:audio_segments.append(segment_result['audio_data'])continueexcept Exception as e:print(f"❌ 处理文本段失败: {e}")# 创建模拟音频作为后备mock_result = _generate_mock_audio(segment, uploads_dir)if mock_result and 'audio_data' in mock_result:audio_segments.append(mock_result['audio_data'])# 合并所有音频段if audio_segments:combined_audio = audio_segments[0]for i in range(1, len(audio_segments)):combined_audio = combined_audio + audio_segments[i]# 生成文件名timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")audio_filename = f"story_audio_{timestamp}.wav"audio_path = os.path.join(uploads_dir, audio_filename)# 保存合并后的音频combined_audio.export(audio_path, format="wav")print(f"✅ 音频合并成功: {audio_path}")return {'filename': audio_filename,'path': audio_path,'url': f"/uploads/{audio_filename}",'text': text,'voice_type': voice_type,'speed': speed}else:# 如果所有方法都失败,返回错误raise Exception("所有语音合成方法都失败了,请检查API配置或稍后再试。")def _split_text(text):"""将长文本分割成多个短段,以便语音合成API处理Args:text: 要分割的文本Returns:list: 文本段列表"""# 分割点正则表达式split_pattern = r'(?<=[。!?.!?])\s*'# 分割文本segments = re.split(split_pattern, text)# 过滤空段segments = [seg.strip() for seg in segments if seg.strip()]# 如果段落太多,进一步合并短段落result_segments = []current_segment = ""for seg in segments:if len(current_segment + seg) <= 500: # 保持每段不超过500字符if current_segment:current_segment += " " + segelse:current_segment = segelse:if current_segment:result_segments.append(current_segment)current_segment = segelse:# 如果单个段落超过500字符,强制分割while len(seg) > 500:result_segments.append(seg[:500])seg = seg[500:]current_segment = seg# 添加最后一个段落if current_segment:result_segments.append(current_segment)return result_segmentsdef _synthesize_single_segment(text, voice_id, speed, uploads_dir):"""合成单个文本段Args:text: 文本内容voice_id: 声音IDspeed: 语速uploads_dir: 上传目录Returns:dict: 包含语音信息的字典"""try:# 尝试阿里云语音合成result = _synthesize_with_aliyun(text, voice_id, speed)if result and 'audio_data' in result:# 生成文件名timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")audio_filename = f"story_audio_{timestamp}.wav"audio_path = os.path.join(uploads_dir, audio_filename)# 保存音频result['audio_data'].export(audio_path, format="wav")print(f"✅ 阿里云语音合成成功: {audio_path}")return {'filename': audio_filename,'path': audio_path,'url': f"/uploads/{audio_filename}",'text': text,'voice_type': voice_id,'speed': speed,'source': 'aliyun'}# 如果阿里云失败,尝试百度语音合成result = _synthesize_with_baidu(text, voice_id, speed)if result and 'audio_data' in result:# 生成文件名timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")audio_filename = f"story_audio_{timestamp}.wav"audio_path = os.path.join(uploads_dir, audio_filename)# 保存音频result['audio_data'].export(audio_path, format="wav")print(f"✅ 百度语音合成成功: {audio_path}")return {'filename': audio_filename,'path': audio_path,'url': f"/uploads/{audio_filename}",'text': text,'voice_type': voice_id,'speed': speed,'source': 'baidu'}# 如果都失败,生成模拟音频return _generate_mock_audio(text, uploads_dir)except Exception as e:print(f"❌ 语音合成失败: {e}")# 生成模拟音频作为后备return _generate_mock_audio(text, uploads_dir)def _synthesize_with_aliyun(text, voice_id, speed):"""使用阿里云语音合成服务Args:text: 文本内容voice_id: 声音IDspeed: 语速Returns:dict: 包含音频数据的字典,或None"""try:client = AliyunClient()# 阿里云TTS参数params = {'voice': voice_id,'speed': speed,'format': 'wav'}print(f"🔄 调用阿里云语音合成API...")audio_bytes = client.text_to_speech(text, **params)if audio_bytes:# 使用pydub加载音频audio = AudioSegment.from_file(io.BytesIO(audio_bytes), format="wav")return {'audio_data': audio}except Exception as e:print(f"❌ 阿里云语音合成失败: {e}")return Nonedef _synthesize_with_baidu(text, voice_type, speed):"""使用百度语音合成服务Args:text: 文本内容voice_type: 声音类型speed: 语速Returns:dict: 包含音频数据的字典,或None"""try:client = BaiduClient()# 百度TTS参数voice_map = {'child': 5, # 百度儿童声音ID'male': 0, # 百度男声ID'female': 1 # 百度女声ID}params = {'per': voice_map.get(voice_type, 1),'spd': int(speed * 5), # 百度语速范围是0-9'pit': 5, # 音高'vol': 5, # 音量'aue': 6 # WAV格式}print(f"🔄 调用百度语音合成API...")audio_bytes = client.text_to_speech(text, **params)if audio_bytes:# 使用pydub加载音频audio = AudioSegment.from_file(io.BytesIO(audio_bytes), format="wav")return {'audio_data': audio}except Exception as e:print(f"❌ 百度语音合成失败: {e}")return Nonedef _generate_mock_audio(text, uploads_dir):"""生成模拟音频(当API调用失败时使用)Args:text: 文本内容uploads_dir: 上传目录Returns:dict: 包含音频信息的字典"""print("🔄 生成模拟音频...")try:# 生成文件名timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")audio_filename = f"mock_audio_{timestamp}.wav"audio_path = os.path.join(uploads_dir, audio_filename)# 创建简单的WAV文件# 采样率sample_rate = 16000# 持续时间(基于文本长度)duration = max(1, min(30, len(text) * 0.1)) # 10-30秒# 生成正弦波t = np.linspace(0, duration, int(sample_rate * duration), endpoint=False)# 创建一个简单的"滴"声freq = 440 # A4音符audio_data = 0.5 * np.sin(2 * np.pi * freq * t)# 添加一些变化for i in range(3, len(text) + 1, 5):start = int(i * 0.1 * sample_rate)if start < len(t):end = min(int((i + 3) * 0.1 * sample_rate), len(t))freq = 440 + (i % 5) * 100audio_data[start:end] = 0.5 * np.sin(2 * np.pi * freq * t[start:end])# 转换为int16audio_data_int16 = np.int16(audio_data * 32767)# 保存WAV文件with wave.open(audio_path, 'w') as wf:wf.setnchannels(1) # 单声道wf.setsampwidth(2) # 16位wf.setframerate(sample_rate)wf.writeframes(audio_data_int16.tobytes())print(f"✅ 模拟音频生成成功: {audio_path}")return {'filename': audio_filename,'path': audio_path,'url': f"/uploads/{audio_filename}",'text': text,'voice_type': 'mock','speed': 1.0,'source': 'mock'}except Exception as e:print(f"❌ 生成模拟音频失败: {e}")raise Exception("无法生成音频,请稍后再试。")
2. 语音生成API路由 (routes/voice_routes.py)
from flask import Blueprint, request, jsonify# 创建语音相关的蓝图
bp = Blueprint('voice', __name__)# 导入语音生成服务
from services.voice_service import text_to_speech# 语音合成路由
@bp.route('/synthesize', methods=['POST'])
def synthesize_voice():try:# 获取请求数据data = request.json# 验证必要的参数if not data or 'text' not in data:return jsonify({'error': '缺少必要的文本参数'}), 400# 获取参数text = data['text']voice_type = data.get('voice_type', 'child') # 默认儿童声音speed = data.get('speed', 'normal') # 默认正常语速# 验证文本长度if len(text) > 2000:return jsonify({'error': '文本长度不能超过2000字符'}), 400# 处理语速参数if isinstance(speed, str):if speed not in ['slow', 'normal', 'fast']:return jsonify({'error': '语速必须是 slow, normal 或 fast'}), 400else:try:speed_value = float(speed)if speed_value < 0.5 or speed_value > 2.0:return jsonify({'error': '语速数值必须在 0.5 到 2.0 之间'}), 400except ValueError:return jsonify({'error': '语速必须是 slow, normal, fast 或 0.5-2.0 之间的数字'}), 400# 验证声音类型if voice_type not in ['child', 'male', 'female']:return jsonify({'error': '声音类型必须是 child, male 或 female'}), 400# 调用语音生成服务# 从请求上下文中获取host_url和upload_folderhost_url = request.host_urlupload_folder = request.environ.get('UPLOAD_FOLDER', './uploads')voice_data = text_to_speech(text, voice_type, speed)# 添加完整的URLvoice_data['full_url'] = host_url.rstrip('/') + voice_data['url']# 返回成功响应return jsonify(voice_data), 200except Exception as e:# 记录错误并返回错误响应print(f"❌ 语音合成API错误: {e}")# 特殊错误处理if "网络错误" in str(e) or "API调用失败" in str(e):return jsonify({'error': '语音服务暂时不可用,请稍后再试'}), 503elif "频率限制" in str(e) or "Too Many Requests" in str(e):return jsonify({'error': '请求过于频繁,请稍后再试'}), 429return jsonify({'error': f'生成语音时发生错误: {str(e)}'}), 500
功能说明
语音生成服务
- 核心功能:将故事文本转换为自然流畅的语音
- 多平台支持:集成阿里云和百度两个语音合成API,提供双重保障
- 声音多样性:支持儿童、男声、女声三种声音类型
- 语速控制:提供慢速、正常、快速三种预设语速,或自定义0.5-2.0范围内的语速
- 智能分段:自动将长文本分割为多个短段进行处理,避免API限制
- 音频合并:使用pydub库将多段音频无缝合并
- 容错机制:当API调用失败时,自动创建模拟音频作为后备方案
文本分段处理
- 智能断句:根据标点符号(句号、感叹号、问号)进行自然断句
- 长度控制:确保每段文本不超过API限制(500字符)
- 优化合并:短文本智能合并,减少API调用次数
双重API策略
- 优先使用:阿里云智能语音交互API,提供更自然的声音效果
- 自动回退:阿里云失败时自动切换到百度语音合成API
- 最终保障:所有API失败时,生成模拟音频确保服务可用性
API路由
- 语音合成:POST /voice/synthesize,接收文本、声音类型和语速参数
- 参数验证:严格的输入验证,确保文本长度、语速范围和声音类型合法
- 错误处理:根据不同错误类型返回相应的HTTP状态码和友好提示
- 完整URL:返回相对URL和完整URL,方便前端使用
技术栈
- Python 3.12
- Flask Web框架
- pydub音频处理库
- NumPy科学计算库
- 阿里云智能语音交互API
- 百度语音合成API
- WAV音频格式处理
- 异常处理与容错机制
使用方法
- 确保配置了阿里云和百度API密钥(在.env文件中)
- 调用 /voice/synthesize 端点,传入故事文本
- 可选指定声音类型(child/male/female)和语速(slow/normal/fast或0.5-2.0之间的数字)
- API返回生成的音频信息,包括文件名、路径、URL和完整URL
与Story模型的集成
- 生成的音频URL可以保存到Story模型的audio_url字段
- 支持为每个故事关联配套音频
- 与实验一的故事生成服务协同工作,为故事提供朗读功能
特色功能
- 多重保障:双API策略+模拟音频后备,确保服务高可用性
- 儿童友好:专为儿童设计的声音类型,语音自然易懂
- 灵活配置:支持多种声音类型和语速选择
- 智能分段:自动处理长文本,优化合成效果
- 音频优化:统一的16kHz采样率,保证音质一致性