fix: fix some error by cache queue

This commit is contained in:
冯琪 2025-03-13 14:00:15 +08:00
parent 3645fa6159
commit ab8dbe8132
4 changed files with 36 additions and 8 deletions

View File

@ -10,7 +10,8 @@ export class AITeacherApp {
this.pageNumPending = null; this.pageNumPending = null;
this.scale = 1.0; this.scale = 1.0;
this.canvas = document.getElementById('pdf-canvas'); this.canvas = document.getElementById('pdf-canvas');
this.ctx = this.canvas.getContext('2d'); // this.ctx = this.canvas.getContext('2d');
this.ctx = this.canvas.getContext('2d', { willReadFrequently: true });
this.messageTimeout = null; this.messageTimeout = null;
// Live2D控制器 // Live2D控制器
@ -790,6 +791,24 @@ export class AITeacherApp {
} }
} }
showMessage(message, isError = false) {
const statusMessage = document.getElementById('status-message');
statusMessage.textContent = message;
statusMessage.className = isError ? 'error' : 'success';
statusMessage.style.display = 'block';
// 清除之前的超时
if (this.messageTimeout) {
clearTimeout(this.messageTimeout);
}
// 3秒后隐藏消息
this.messageTimeout = setTimeout(() => {
statusMessage.style.display = 'none';
}, 3000);
}
async loadVoices() { async loadVoices() {
try { try {
const response = await fetch(`http://${this.api_host}/api/voices`); const response = await fetch(`http://${this.api_host}/api/voices`);

BIN
public/favicon.ico Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

View File

@ -40,10 +40,12 @@ if not openai_api_key:
logger.warning("OpenAI API key not found. AI explanation will use fallback mode.") logger.warning("OpenAI API key not found. AI explanation will use fallback mode.")
# 加载设置 # 加载设置
# 尽量把密钥,服务器地址,端口之类的设置全部放到setting.json中
try: try:
with open('setting.json', 'r') as f: with open('setting.json', 'r') as f:
settings = json.load(f) settings = json.load(f)
port = settings.get('websocket_port', 6006) port = settings.get('websocket_port', 6006)
TTS_BASE_URL = settings.get('TTS_BASE_URL', TTS_BASE_URL)
except Exception as e: except Exception as e:
logger.error(f"Error loading settings: {e}") logger.error(f"Error loading settings: {e}")
port = 6006 port = 6006
@ -264,11 +266,14 @@ async def tts():
'error': result["error"] 'error': result["error"]
}) })
cache_explanation = {} cache_explanation = {"is_caching_flag":[]}
# 这里使用异步执行的方式, 用于提前加载缓存的讲解 # 这里使用异步执行的方式, 用于提前加载缓存的讲解
async def generate_cache_explanation(page_num,voice,speed): async def generate_cache_explanation(page_num,voice,speed):
global cache_explanation global cache_explanation
if page_num not in cache_explanation and page_num > 0 and page_num <= pdfpages: global pdfpages
global current_pdf_path
if page_num not in cache_explanation and page_num > 0 and page_num <= pdfpages and page_num not in cache_explanation["is_caching_flag"]:
cache_explanation["is_caching_flag"].append(page_num)
text = extract_page_text(current_pdf_path, page_num)["page_text"] text = extract_page_text(current_pdf_path, page_num)["page_text"]
result = [] result = []
result.append(generate_explanation(page_num, text)) result.append(generate_explanation(page_num, text))
@ -276,7 +281,8 @@ async def generate_cache_explanation(page_num,voice,speed):
cache_explanation[page_num] = result cache_explanation[page_num] = result
logger.info(f"已缓存讲解: {page_num}") logger.info(f"已缓存讲解: {page_num}")
if page_num+1 not in cache_explanation and page_num+1 > 0 and page_num+1 <= pdfpages: if page_num+1 not in cache_explanation and page_num+1 > 0 and page_num+1 <= pdfpages and page_num+1 not in cache_explanation["is_caching_flag"]:
cache_explanation["is_caching_flag"].append(page_num+1)
text = extract_page_text(current_pdf_path, page_num+1)["page_text"] text = extract_page_text(current_pdf_path, page_num+1)["page_text"]
result = [] result = []
result.append(generate_explanation(page_num+1, text)) result.append(generate_explanation(page_num+1, text))
@ -284,7 +290,8 @@ async def generate_cache_explanation(page_num,voice,speed):
cache_explanation[page_num+1] = result cache_explanation[page_num+1] = result
logger.info(f"已缓存讲解: {page_num+1}") logger.info(f"已缓存讲解: {page_num+1}")
if page_num-1 not in cache_explanation and page_num-1 > 0 and page_num-1 <= pdfpages: if page_num-1 not in cache_explanation and page_num-1 > 0 and page_num-1 <= pdfpages and page_num-1 not in cache_explanation["is_caching_flag"]:
cache_explanation["is_caching_flag"].append(page_num-1)
text = extract_page_text(current_pdf_path, page_num-1)["page_text"] text = extract_page_text(current_pdf_path, page_num-1)["page_text"]
result = [] result = []
result.append(generate_explanation(page_num-1, text)) result.append(generate_explanation(page_num-1, text))
@ -355,14 +362,15 @@ async def load_pdf():
global cache_explanation global cache_explanation
global pdfpages global pdfpages
# 清空cache # 清空cache
cache_explanation = {} # cache_explanation = {"is_caching_flag":[]}
chat_history = [] chat_history = []
data = await request.json data = await request.json
logger.info(f"加载PDF: {data}") logger.info(f"加载PDF: {data}")
pdf_path = data.get('path', './public/pdf/test.pdf') pdf_path = data.get('path', './public/pdf/test.pdf')
if pdf_path != current_pdf_path:
cache_explanation = {"is_caching_flag":[]}
try: try:
# 检查PDF是否存在 # 检查PDF是否存在
if not os.path.exists(pdf_path): if not os.path.exists(pdf_path):
@ -384,7 +392,7 @@ async def load_pdf():
voice = 'zf_xiaoxiao' voice = 'zf_xiaoxiao'
speed = 1.0 speed = 1.0
start_time = time.time() start_time = time.time()
await generate_cache_explanation(0,voice,speed) asyncio.create_task(generate_cache_explanation(0,voice,speed))
logger.info(f"预加载讲解耗时: {time.time()-start_time}") logger.info(f"预加载讲解耗时: {time.time()-start_time}")
return jsonify({ return jsonify({
'success': True, 'success': True,

View File

@ -1,6 +1,7 @@
{ {
"OPENAI_API_KEY":"your_openai_api_key_here", "OPENAI_API_KEY":"your_openai_api_key_here",
"VOICERSS_API_KEY":"your_voicerss_api_key_here", "VOICERSS_API_KEY":"your_voicerss_api_key_here",
"TTS_BASE_URL":"https://server.feng-arch.cn:52861",
"websocket_port": 6006, "websocket_port": 6006,
"port": 6007 "port": 6007
} }