diff --git a/src/audio_processor.js b/src/audio_processor.js
new file mode 100644
index 0000000..4734432
--- /dev/null
+++ b/src/audio_processor.js
@@ -0,0 +1,322 @@
+// 音频处理模块 - 提取自 new_app.js 的高级音频处理功能
+
+class AudioProcessor {
+    constructor(options = {}) {
+        this.audioContext = null;
+        this.isRecording = false;
+        this.audioChunks = [];
+        
+        // VAD相关属性
+        this.isSpeaking = false;
+        this.silenceThreshold = options.silenceThreshold || 0.01;
+        this.silenceTimeout = options.silenceTimeout || 1000;
+        this.minSpeechDuration = options.minSpeechDuration || 300;
+        this.silenceTimer = null;
+        this.speechStartTime = null;
+        this.audioBuffer = [];
+        
+        // API配置
+        this.apiConfig = {
+            url: 'https://openspeech.bytedance.com/api/v3/auc/bigmodel/recognize/flash',
+            headers: {
+                'X-Api-App-Key': '1988591469',
+                'X-Api-Access-Key': 'mdEyhgZ59on1-NK3GXWAp3L4iLldSG0r',
+                'X-Api-Resource-Id': 'volc.bigasr.auc_turbo',
+                'X-Api-Request-Id': this.generateUUID(),
+                'X-Api-Sequence': '-1',
+                'Content-Type': 'application/json'
+            }
+        };
+        
+        // 回调函数
+        this.onSpeechStart = options.onSpeechStart || (() => {});
+        this.onSpeechEnd = options.onSpeechEnd || (() => {});
+        this.onRecognitionResult = options.onRecognitionResult || (() => {});
+        this.onError = options.onError || (() => {});
+        this.onStatusUpdate = options.onStatusUpdate || (() => {});
+    }
+    
+    // 生成UUID
+    generateUUID() {
+        return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
+            const r = Math.random() * 16 | 0;
+            const v = c == 'x' ? r : (r & 0x3 | 0x8);
+            return v.toString(16);
+        });
+    }
+    
+    // 计算音频能量(音量)
+    calculateAudioLevel(audioData) {
+        let sum = 0;
+        for (let i = 0; i < audioData.length; i++) {
+            sum += audioData[i] * audioData[i];
+        }
+        return Math.sqrt(sum / audioData.length);
+    }
+    
+    // 语音活动检测
+    detectVoiceActivity(audioData) {
+        const audioLevel = this.calculateAudioLevel(audioData);
+        const currentTime = Date.now();
+        
+        if (audioLevel > this.silenceThreshold) {
+            if (!this.isSpeaking) {
+                this.isSpeaking = true;
+                this.speechStartTime = currentTime;
+                this.audioBuffer = [];
+                this.onSpeechStart();
+                this.onStatusUpdate('检测到语音,开始录音...', 'speaking');
+                console.log('开始说话');
+            }
+            
+            if (this.silenceTimer) {
+                clearTimeout(this.silenceTimer);
+                this.silenceTimer = null;
+            }
+            
+            return true;
+        } else {
+            if (this.isSpeaking && !this.silenceTimer) {
+                this.silenceTimer = setTimeout(() => {
+                    this.handleSpeechEnd();
+                }, this.silenceTimeout);
+            }
+            
+            return this.isSpeaking;
+        }
+    }
+    
+    // 语音结束处理
+    async handleSpeechEnd() {
+        if (this.isSpeaking) {
+            const speechDuration = Date.now() - this.speechStartTime;
+            
+            if (speechDuration >= this.minSpeechDuration) {
+                console.log(`语音结束,时长: ${speechDuration}ms`);
+                await this.processAudioBuffer();
+                this.onStatusUpdate('语音识别中...', 'processing');
+            } else {
+                console.log('说话时长太短,忽略');
+                this.onStatusUpdate('等待语音输入...', 'ready');
+            }
+            
+            this.isSpeaking = false;
+            this.speechStartTime = null;
+            this.audioBuffer = [];
+            this.onSpeechEnd();
+        }
+        
+        if (this.silenceTimer) {
+            clearTimeout(this.silenceTimer);
+            this.silenceTimer = null;
+        }
+    }
+    
+    // 处理音频缓冲区并发送到API
+    async processAudioBuffer() {
+        if (this.audioBuffer.length === 0) {
+            return;
+        }
+        
+        try {
+            // 合并所有音频数据
+            const totalLength = this.audioBuffer.reduce((sum, buffer) => sum + buffer.length, 0);
+            const combinedBuffer = new Float32Array(totalLength);
+            let offset = 0;
+            
+            for (const buffer of this.audioBuffer) {
+                combinedBuffer.set(buffer, offset);
+                offset += buffer.length;
+            }
+            
+            // 转换为WAV格式并编码为base64
+            const wavBuffer = this.encodeWAV(combinedBuffer, 16000);
+            const base64Audio = this.arrayBufferToBase64(wavBuffer);
+            
+            // 调用ASR API
+            await this.callASRAPI(base64Audio);
+            
+        } catch (error) {
+            console.error('处理音频数据失败:', error);
+            this.onError('处理音频数据失败: ' + error.message);
+        }
+    }
+    
+    // 调用ASR API
+    async callASRAPI(base64AudioData) {
+        try {
+            const requestBody = {
+                user: {
+                    uid: "1988591469"
+                },
+                audio: {
+                    data: base64AudioData
+                },
+                request: {
+                    model_name: "bigmodel"
+                }
+            };
+            
+            const response = await fetch(this.apiConfig.url, {
+                method: 'POST',
+                headers: this.apiConfig.headers,
+                body: JSON.stringify(requestBody)
+            });
+            
+            if (!response.ok) {
+                throw new Error(`HTTP error! status: ${response.status}`);
+            }
+            
+            const result = await response.json();
+            this.handleASRResponse(result);
+            
+        } catch (error) {
+            console.error('ASR API调用失败:', error);
+            this.onError('ASR API调用失败: ' + error.message);
+        }
+    }
+    
+    // 处理ASR响应
+    handleASRResponse(response) {
+        console.log('ASR响应:', response);
+        
+        if (response && response.result) {
+            const recognizedText = response.result.text;
+            this.onRecognitionResult(recognizedText);
+            this.onStatusUpdate('识别完成', 'completed');
+        } else {
+            console.log('未识别到文字');
+            this.onStatusUpdate('未识别到文字', 'ready');
+        }
+    }
+    
+    // 编码WAV格式
+    encodeWAV(samples, sampleRate) {
+        const length = samples.length;
+        const buffer = new ArrayBuffer(44 + length * 2);
+        const view = new DataView(buffer);
+        
+        // WAV文件头
+        const writeString = (offset, string) => {
+            for (let i = 0; i < string.length; i++) {
+                view.setUint8(offset + i, string.charCodeAt(i));
+            }
+        };
+        
+        writeString(0, 'RIFF');
+        view.setUint32(4, 36 + length * 2, true);
+        writeString(8, 'WAVE');
+        writeString(12, 'fmt ');
+        view.setUint32(16, 16, true);
+        view.setUint16(20, 1, true);
+        view.setUint16(22, 1, true);
+        view.setUint32(24, sampleRate, true);
+        view.setUint32(28, sampleRate * 2, true);
+        view.setUint16(32, 2, true);
+        view.setUint16(34, 16, true);
+        writeString(36, 'data');
+        view.setUint32(40, length * 2, true);
+        
+        // 写入音频数据
+        let offset = 44;
+        for (let i = 0; i < length; i++) {
+            const sample = Math.max(-1, Math.min(1, samples[i]));
+            view.setInt16(offset, sample * 0x7FFF, true);
+            offset += 2;
+        }
+        
+        return buffer;
+    }
+    
+    // ArrayBuffer转Base64
+    arrayBufferToBase64(buffer) {
+        let binary = '';
+        const bytes = new Uint8Array(buffer);
+        for (let i = 0; i < bytes.byteLength; i++) {
+            binary += String.fromCharCode(bytes[i]);
+        }
+        return btoa(binary);
+    }
+    
+    // 开始录音
+    async startRecording() {
+        try {
+            const stream = await navigator.mediaDevices.getUserMedia({
+                audio: {
+                    sampleRate: 16000,
+                    channelCount: 1,
+                    echoCancellation: true,
+                    noiseSuppression: true
+                }
+            });
+            
+            this.audioContext = new (window.AudioContext || window.webkitAudioContext)({
+                sampleRate: 16000
+            });
+            
+            const source = this.audioContext.createMediaStreamSource(stream);
+            const processor = this.audioContext.createScriptProcessor(4096, 1, 1);
+            
+            processor.onaudioprocess = (event) => {
+                const inputBuffer = event.inputBuffer;
+                const inputData = inputBuffer.getChannelData(0);
+                
+                // 语音活动检测
+                if (this.detectVoiceActivity(inputData)) {
+                    // 如果检测到语音活动,缓存音频数据
+                    this.audioBuffer.push(new Float32Array(inputData));
+                }
+            };
+            
+            source.connect(processor);
+            processor.connect(this.audioContext.destination);
+            
+            this.isRecording = true;
+            this.onStatusUpdate('等待语音输入...', 'ready');
+            
+            return true;
+            
+        } catch (error) {
+            console.error('启动录音失败:', error);
+            this.onError('启动录音失败: ' + error.message);
+            return false;
+        }
+    }
+    
+    // 停止录音
+    stopRecording() {
+        if (this.audioContext) {
+            this.audioContext.close();
+            this.audioContext = null;
+        }
+        
+        if (this.silenceTimer) {
+            clearTimeout(this.silenceTimer);
+            this.silenceTimer = null;
+        }
+        
+        // 如果正在说话,处理最后的音频
+        if (this.isSpeaking) {
+            this.handleSpeechEnd();
+        }
+        
+        this.isRecording = false;
+        this.isSpeaking = false;
+        this.audioBuffer = [];
+        
+        this.onStatusUpdate('录音已停止', 'stopped');
+        console.log('录音已停止');
+    }
+    
+    // 获取录音状态
+    getRecordingStatus() {
+        return {
+            isRecording: this.isRecording,
+            isSpeaking: this.isSpeaking,
+            hasAudioContext: !!this.audioContext
+        };
+    }
+}
+
+// 导出模块
+export { AudioProcessor };
\ No newline at end of file
diff --git a/src/chat_with_audio.js b/src/chat_with_audio.js
index bbe0d4d..6a0e6f8 100644
--- a/src/chat_with_audio.js
+++ b/src/chat_with_audio.js
@@ -6,6 +6,9 @@ import { getLLMConfig, getMinimaxiConfig, getAudioConfig, validateConfig } from
 
 // 防止重复播放的标志
 let isPlaying = false;
+// 音频播放队列
+let audioQueue = [];
+let isProcessingQueue = false;
 
 async function chatWithAudioStream(userInput) {
   // 验证配置
@@ -20,7 +23,48 @@ async function chatWithAudioStream(userInput) {
   const minimaxiConfig = getMinimaxiConfig();
   const audioConfig = getAudioConfig();
   
-  // 1. 请求大模型回答
+  // 清空音频队列
+  audioQueue = [];
+  
+  // 定义段落处理函数
+  const handleSegment = async (segment) => {
+    console.log('\n=== 处理文本段落 ===');
+    console.log('段落内容:', segment);
+    
+    try {
+      // 为每个段落生成音频
+      const audioResult = await requestMinimaxi({
+        apiKey: minimaxiConfig.apiKey,
+        groupId: minimaxiConfig.groupId,
+        body: {
+          model: audioConfig.model,
+          text: segment,
+          stream: audioConfig.stream,
+          language_boost: audioConfig.language_boost,
+          output_format: audioConfig.output_format,
+          voice_setting: audioConfig.voiceSetting,
+          audio_setting: audioConfig.audioSetting,
+        },
+        stream: true,
+      });
+      
+      // 将音频添加到播放队列
+      if (audioResult && audioResult.data && audioResult.data.audio) {
+        audioQueue.push({
+          text: segment,
+          audioHex: audioResult.data.audio
+        });
+        console.log('音频已添加到队列,队列长度:', audioQueue.length);
+        
+        // 开始处理队列
+        processAudioQueue();
+      }
+    } catch (error) {
+      console.error('生成音频失败:', error);
+    }
+  };
+  
+  // 1. 请求大模型回答,并实时处理段落
   console.log('\n=== 请求大模型回答 ===');
   const llmResponse = await requestLLMStream({
     apiKey: llmConfig.apiKey,
@@ -29,55 +73,45 @@ async function chatWithAudioStream(userInput) {
       { role: 'system', content: 'You are a helpful assistant.' },
       { role: 'user', content: userInput },
     ],
+    onSegment: handleSegment // 传入段落处理回调
   });
   
-  // 提取大模型回答内容(现在直接返回内容)
-  const llmContent = llmResponse;
-  
-  console.log('\n=== 大模型回答 ===');
-  console.log("llmResponse: ", llmContent);
-  
-  // 2. 合成音频
-  console.log('\n=== 开始合成音频 ===');
-  const audioResult = await requestMinimaxi({
-    apiKey: minimaxiConfig.apiKey,
-    groupId: minimaxiConfig.groupId,
-    body: {
-      model: audioConfig.model,
-      text: llmContent,
-      stream: audioConfig.stream,
-      language_boost: audioConfig.language_boost,
-      output_format: audioConfig.output_format,
-      voice_setting: audioConfig.voiceSetting,
-      audio_setting: audioConfig.audioSetting,
-    },
-    stream: true,
-  });
-  
-  // 3. 流式播放音频
-  console.log('\n=== 开始流式播放音频 ===');
-  // console.log('音频数据长度:', audioResult.data.audio.length);
-  await playAudioStream(audioResult.data.audio);
+  console.log('\n=== 大模型完整回答 ===');
+  console.log("llmResponse: ", llmResponse);
   
   return {
     userInput,
-    llmResponse: llmContent,
-    audioResult,
+    llmResponse,
+    audioQueue: audioQueue.map(item => ({ text: item.text, hasAudio: !!item.audioHex }))
   };
 }
 
+// 处理音频播放队列
+async function processAudioQueue() {
+  if (isProcessingQueue) return;
+  
+  isProcessingQueue = true;
+  
+  // while (audioQueue.length > 0) {
+  //   const audioItem = audioQueue.shift();
+  //   console.log('\n=== 播放队列中的音频 ===');
+  //   console.log('文本:', audioItem.text);
+    
+  //   try {
+  //     await playAudioStream(audioItem.audioHex);
+  //   } catch (error) {
+  //     console.error('播放音频失败:', error);
+  //   }
+  // }
+  
+  isProcessingQueue = false;
+}
+
 // 流式播放音频
 async function playAudioStream(audioHex) {
-  if (isPlaying) {
-    console.log('音频正在播放中,跳过重复播放');
-    return;
-  }
-  
   console.log('=== 开始播放音频 ===');
   console.log('音频数据长度:', audioHex.length);
   
-  isPlaying = true;
-  
   // 将hex转换为ArrayBuffer
   const audioBuffer = hexToArrayBuffer(audioHex);
   
@@ -102,13 +136,11 @@ async function playAudioStream(audioHex) {
     return new Promise((resolve) => {
       source.onended = () => {
         console.log('音频播放完成');
-        isPlaying = false;
         resolve();
       };
     });
   } catch (error) {
     console.error('音频播放失败:', error);
-    isPlaying = false;
     throw error;
   }
 }
@@ -175,4 +207,6 @@ async function playAudioStreamNode(audioHex) {
   }
 }
 
-export { chatWithAudioStream, playAudioStream, playAudioStreamNode }; 
\ No newline at end of file
+
+
+export { chatWithAudioStream, playAudioStream, playAudioStreamNode};
\ No newline at end of file
diff --git a/src/config.js b/src/config.js
index 8cb236c..9d4b477 100644
--- a/src/config.js
+++ b/src/config.js
@@ -16,11 +16,11 @@ export const config = {
   audio: {
     model: 'speech-02-hd',
     voiceSetting: {
-      voice_id: 'yantu-qinggang',
+      voice_id: 'yantu-qinggang-2',
       speed: 1,
       vol: 1,
       pitch: 0,
-      emotion: 'happy',
+      // emotion: 'happy',
     },
     audioSetting: {
       sample_rate: 32000,
diff --git a/src/index - 副本.html b/src/index - 副本.html
new file mode 100644
index 0000000..c8bb39e
--- /dev/null
+++ b/src/index - 副本.html	
@@ -0,0 +1,139 @@
+
+
+
+    
+    
+    实时语音识别
+    
+
+
+    
+        
实时语音识别
+        
+        
+            
+        
+        
+        
未连接
+        
+        
+            使用说明:
+            1. 点击"开始录音"按钮开启麦克风
+            2. 系统会自动检测您的语音,只有在检测到说话时才开始录音
+            3. 说话结束后会自动发送音频进行识别
+            4. 识别结果会显示在下方区域
+        
+        
+        
识别结果:
+        
+            
+        
+