diff --git a/js/ai-teacher-app.js b/js/ai-teacher-app.js index ad53a17..12af1f9 100644 --- a/js/ai-teacher-app.js +++ b/js/ai-teacher-app.js @@ -10,7 +10,8 @@ export class AITeacherApp { this.pageNumPending = null; this.scale = 1.0; this.canvas = document.getElementById('pdf-canvas'); - this.ctx = this.canvas.getContext('2d'); + // this.ctx = this.canvas.getContext('2d'); + this.ctx = this.canvas.getContext('2d', { willReadFrequently: true }); this.messageTimeout = null; // Live2D控制器 @@ -790,6 +791,24 @@ export class AITeacherApp { } } + + showMessage(message, isError = false) { + const statusMessage = document.getElementById('status-message'); + statusMessage.textContent = message; + statusMessage.className = isError ? 'error' : 'success'; + statusMessage.style.display = 'block'; + + // 清除之前的超时 + if (this.messageTimeout) { + clearTimeout(this.messageTimeout); + } + + // 3秒后隐藏消息 + this.messageTimeout = setTimeout(() => { + statusMessage.style.display = 'none'; + }, 3000); + } + async loadVoices() { try { const response = await fetch(`http://${this.api_host}/api/voices`); diff --git a/public/favicon.ico b/public/favicon.ico new file mode 100644 index 0000000..5a57ce7 Binary files /dev/null and b/public/favicon.ico differ diff --git a/server.py b/server.py index dbfa85c..0ed0b5c 100644 --- a/server.py +++ b/server.py @@ -40,10 +40,12 @@ if not openai_api_key: logger.warning("OpenAI API key not found. AI explanation will use fallback mode.") # 加载设置 +# 尽量把密钥,服务器地址,端口之类的设置全部放到setting.json中 try: with open('setting.json', 'r') as f: settings = json.load(f) port = settings.get('websocket_port', 6006) + TTS_BASE_URL = settings.get('TTS_BASE_URL', TTS_BASE_URL) except Exception as e: logger.error(f"Error loading settings: {e}") port = 6006 @@ -264,11 +266,14 @@ async def tts(): 'error': result["error"] }) -cache_explanation = {} +cache_explanation = {"is_caching_flag":[]} # 这里使用异步执行的方式, 用于提前加载缓存的讲解 async def generate_cache_explanation(page_num,voice,speed): global cache_explanation - if page_num not in cache_explanation and page_num > 0 and page_num <= pdfpages: + global pdfpages + global current_pdf_path + if page_num not in cache_explanation and page_num > 0 and page_num <= pdfpages and page_num not in cache_explanation["is_caching_flag"]: + cache_explanation["is_caching_flag"].append(page_num) text = extract_page_text(current_pdf_path, page_num)["page_text"] result = [] result.append(generate_explanation(page_num, text)) @@ -276,7 +281,8 @@ async def generate_cache_explanation(page_num,voice,speed): cache_explanation[page_num] = result logger.info(f"已缓存讲解: {page_num}") - if page_num+1 not in cache_explanation and page_num+1 > 0 and page_num+1 <= pdfpages: + if page_num+1 not in cache_explanation and page_num+1 > 0 and page_num+1 <= pdfpages and page_num+1 not in cache_explanation["is_caching_flag"]: + cache_explanation["is_caching_flag"].append(page_num+1) text = extract_page_text(current_pdf_path, page_num+1)["page_text"] result = [] result.append(generate_explanation(page_num+1, text)) @@ -284,7 +290,8 @@ async def generate_cache_explanation(page_num,voice,speed): cache_explanation[page_num+1] = result logger.info(f"已缓存讲解: {page_num+1}") - if page_num-1 not in cache_explanation and page_num-1 > 0 and page_num-1 <= pdfpages: + if page_num-1 not in cache_explanation and page_num-1 > 0 and page_num-1 <= pdfpages and page_num-1 not in cache_explanation["is_caching_flag"]: + cache_explanation["is_caching_flag"].append(page_num-1) text = extract_page_text(current_pdf_path, page_num-1)["page_text"] result = [] result.append(generate_explanation(page_num-1, text)) @@ -355,14 +362,15 @@ async def load_pdf(): global cache_explanation global pdfpages # 清空cache - cache_explanation = {} + # cache_explanation = {"is_caching_flag":[]} chat_history = [] data = await request.json logger.info(f"加载PDF: {data}") pdf_path = data.get('path', './public/pdf/test.pdf') - + if pdf_path != current_pdf_path: + cache_explanation = {"is_caching_flag":[]} try: # 检查PDF是否存在 if not os.path.exists(pdf_path): @@ -384,7 +392,7 @@ async def load_pdf(): voice = 'zf_xiaoxiao' speed = 1.0 start_time = time.time() - await generate_cache_explanation(0,voice,speed) + asyncio.create_task(generate_cache_explanation(0,voice,speed)) logger.info(f"预加载讲解耗时: {time.time()-start_time}") return jsonify({ 'success': True, diff --git a/setting.json b/setting.json index a38ee6e..f7af4bb 100644 --- a/setting.json +++ b/setting.json @@ -1,6 +1,7 @@ { "OPENAI_API_KEY":"your_openai_api_key_here", "VOICERSS_API_KEY":"your_voicerss_api_key_here", + "TTS_BASE_URL":"https://server.feng-arch.cn:52861", "websocket_port": 6006, "port": 6007 } \ No newline at end of file