Compare commits

...

66 Commits

Author SHA1 Message Date
Song367
ca26bb3986 branch
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 45s
2025-10-09 10:08:26 +08:00
Song367
a418690eb1 3000 port 2025-10-09 10:06:28 +08:00
Song367
f78ebc2959 修改端口
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 1m1s
2025-09-30 16:49:55 +08:00
Song367
2760140445 迁移服务器kehu237
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 3m28s
2025-09-30 16:44:20 +08:00
Song367
a66f0f95f6 简短开场白
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 37s
2025-08-14 20:20:11 +08:00
1359af31d8 Merge branch 'new_male' into kehu
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 38s
2025-08-13 21:41:22 +08:00
Song367
0b02f01bec 开场白话术更改
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 38s
2025-08-13 18:55:17 +08:00
Song367
6ba96bc177 添加更新历史对话
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m55s
2025-08-13 13:51:14 +08:00
Song367
31c6d29bd8 延长超时时间
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m54s
2025-08-13 13:35:17 +08:00
Song367
31877b8729 切换生成开场白触发时机
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m53s
2025-08-13 13:26:40 +08:00
Song367
3e97e3031f 添加20条历史对话限制,添加开场白
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m57s
2025-08-13 12:30:18 +08:00
Song367
b1c0656bb4 添加过滤时的打印
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m53s
2025-08-13 10:05:37 +08:00
06aa52f152 优化旁白问题
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m54s
2025-08-12 21:40:55 +08:00
Song367
384d6bbb67 客户版本
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m53s
2025-08-12 16:24:41 +08:00
Song367
27234f1a5a 去除音频超时保护
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m53s
2025-08-12 16:19:52 +08:00
Song367
9ef75a1745 添加触发切换默认的条件
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m53s
2025-08-12 16:09:31 +08:00
Song367
6e7ab83091 取消消息限制
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m57s
2025-08-12 15:43:15 +08:00
Song367
929d6887ba 修改历史对话
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 37s
2025-08-12 13:45:40 +08:00
Song367
2f7b88ece8 头像位置公司名修改
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m53s
2025-08-12 12:56:07 +08:00
Song367
e0ee7606a0 轮询播放,去掉超时处理
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m53s
2025-08-12 12:10:13 +08:00
Song367
9a1bd0acfd 加载100条历史对话
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m53s
2025-08-12 11:10:35 +08:00
Song367
1731519143 修改等待图标显示问题,切换默认视频卡顿问题,读文本不音频生成慢导致,没播放口播
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 37s
2025-08-11 15:21:58 +08:00
Song367
4c50f77889 解决加载动画在视频出来前不显示
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m54s
2025-08-11 14:59:08 +08:00
Song367
ec06a76a10 标题
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m56s
2025-08-11 14:16:07 +08:00
Song367
b4f9cf2b50 去除加载
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m55s
2025-08-11 14:05:47 +08:00
Song367
bc79ccb601 获取index
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m55s
2025-08-11 13:58:59 +08:00
Song367
0478bb4cdd 等待视频加载完成
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m53s
2025-08-11 13:04:25 +08:00
Song367
a73165a6a1 延长等待流时间
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m54s
2025-08-08 17:50:51 +08:00
Song367
b026ee1664 增加字数长度8
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m53s
2025-08-08 16:49:54 +08:00
Song367
fb2305085f 去除闪烁问题
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m53s
2025-08-08 16:43:49 +08:00
Song367
cab8273bf6 修改睡觉说话视频
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m53s
2025-08-08 14:17:33 +08:00
Song367
9cfe6cc5b1 添加睡觉场景
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m51s
2025-08-08 13:02:11 +08:00
Song367
22d99e2178 添加刚睡觉的场景
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m56s
2025-08-08 12:18:05 +08:00
Song367
226ec68525 弃用webrtc 切换视频
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m51s
2025-08-07 21:39:08 +08:00
Song367
2959a56978 修改history
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m50s
2025-08-07 17:56:34 +08:00
Song367
e281545435 initaial
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 34s
2025-08-06 20:13:33 +08:00
Song367
5cd134aa29 增加帧数
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m48s
2025-08-06 19:43:38 +08:00
Song367
35c7e82375 使用视频帧保证切换问题
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m49s
2025-08-06 19:23:58 +08:00
Song367
5f1e50c7e9 头像显示时机
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m48s
2025-08-06 18:40:49 +08:00
Song367
5c9e170c73 背景色
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m47s
2025-08-06 17:51:32 +08:00
Song367
df6ac7ba73 刷新页面时间
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m48s
2025-08-06 17:29:26 +08:00
Song367
eb533aacdf 添加隐藏通话按钮
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m47s
2025-08-06 13:43:25 +08:00
Song367
8dc78dd6e3 添加等待描述
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m47s
2025-08-06 13:16:41 +08:00
Song367
4a25beed44 更新视频
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m46s
2025-08-06 12:03:47 +08:00
Song367
7580b5c01e 切换视频
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m40s
2025-08-06 10:41:40 +08:00
Song367
a23160f35c 修改等待时间
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m40s
2025-08-06 01:48:53 +08:00
Song367
c080194bac html
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m40s
2025-08-06 01:24:57 +08:00
3a8257df5b 部署
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m41s
2025-08-06 01:13:10 +08:00
dfcab8e6a7 initial 2025-08-06 01:01:41 +08:00
Song367
9d4bbc182c 场景人设切换
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m30s
2025-08-04 18:29:25 +08:00
Song367
c96c49ff3f 视频切换,切换三个场景,未实现场景人设切换
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 2m18s
2025-08-04 17:36:30 +08:00
Song367
f176818155 省略号判断
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 1m34s
2025-08-04 11:00:49 +08:00
Song367
9926ee0e68 不对口型,添加调试人设对话
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 1m34s
2025-08-04 10:52:55 +08:00
424b9f3c12 new video to new demo 2025-08-04 09:38:33 +08:00
Song367
a20ffbedf6 change icon title
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 29s
2025-07-31 12:25:49 +08:00
Song367
1931b859de 版本居中2
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 1m23s
2025-07-31 11:58:42 +08:00
Song367
537006ae5f 优化UI
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 1m22s
2025-07-31 11:43:51 +08:00
Song367
ccd0a8bc34 Merge branch 'dev' of https://gitea.yantootech.com/songjvcheng/WebRtc_QingGan into dev
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 1m19s
2025-07-30 19:02:31 +08:00
Song367
25ad982b5f ok 2025-07-30 18:58:34 +08:00
78390a37de 修改音色
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 1m21s
2025-07-30 16:29:10 +08:00
Song367
e1d4b545d6 new voice
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 1m19s
2025-07-30 11:20:19 +08:00
Song367
0da9a17570 change dockerfile
All checks were successful
Gitea Actions Demo / Explore-Gitea-Actions (push) Successful in 1m26s
2025-07-29 18:40:59 +08:00
Song367
d06e94ad11 修改部署文件
Some checks failed
Gitea Actions Demo / Explore-Gitea-Actions (push) Failing after 1m22s
2025-07-29 18:34:11 +08:00
Song367
345533ed6e 添加yaml 部署文件
Some checks failed
Gitea Actions Demo / Explore-Gitea-Actions (push) Failing after 4m29s
2025-07-29 18:23:17 +08:00
Song367
65f17b4a66 切换增加视频过渡效果,避免黑屏。UI大整改 2025-07-29 18:14:04 +08:00
Song367
1ebfd472c4 未完成视频衔接,增加语句长度7个字符。这样可以控制确保下一段音频能够生成,保证视频流不会切换至default 2025-07-29 15:35:36 +08:00
49 changed files with 2757 additions and 892 deletions

24
.gitea/charts/Chart.yaml Normal file
View File

@ -0,0 +1,24 @@
apiVersion: v2
name: homeland
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "1.16.0"

View File

@ -0,0 +1,22 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "homeland.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "homeland.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "homeland.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "homeland.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "homeland.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "homeland.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "homeland.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "homeland.labels" -}}
helm.sh/chart: {{ include "homeland.chart" . }}
{{ include "homeland.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "homeland.selectorLabels" -}}
app.kubernetes.io/name: {{ include "homeland.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "homeland.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "homeland.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,61 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "homeland.fullname" . }}
labels:
{{- include "homeland.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "homeland.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "homeland.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "homeland.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.service.port }}
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,32 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "homeland.fullname" . }}
labels:
{{- include "homeland.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "homeland.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,61 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "homeland.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "homeland.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "homeland.fullname" . }}
labels:
{{- include "homeland.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "homeland.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "homeland.serviceAccountName" . }}
labels:
{{- include "homeland.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "homeland.fullname" . }}-test-connection"
labels:
{{- include "homeland.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "homeland.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

84
.gitea/charts/values.yaml Normal file
View File

@ -0,0 +1,84 @@
# Default values for homeland.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: 172.16.54.94:5000/homeland
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "29"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: homeland.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {
kubernetes.io/hostname: vm10-1-0-12
}
tolerations: []
affinity: {}

View File

@ -0,0 +1,41 @@
name: Gitea Actions Demo
run-name: ${{ gitea.actor }} is testing out Gitea Actions 🚀
on:
push:
branches:
- 'newkehu237'
env:
BUILD: staging
jobs:
Explore-Gitea-Actions:
runs-on: yantoo-ci
steps:
- run: echo "🎉 The job was automatically triggered by a ${{ gitea.event_name }} event."
- run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by Gitea!"
- run: echo "🔎 The name of your branch is ${{ gitea.ref }} and your repository is ${{ gitea.repository }}."
- name: Check out repository code
uses: https://gitea.yantootech.com/neil/checkout@v4
- run: echo "💡 The ${{ gitea.repository }} repository has been cloned to the runner."
- run: echo "🖥️ The workflow is now ready to test your code on the runner."
- name: List files in the repository
run: |
whoami
uname -a
pwd
ls ${{ gitea.workspace }}
- name: Build and push
uses: https://gitea.yantootech.com/neil/build-push-action@v6
with:
push: true
tags: 14.103.114.237:30005/emotion-male-app:${{ gitea.run_id }}
- name: Install
run: |
helm upgrade --install emotion-male-app ./.gitea/charts \
--namespace emotion-human \
--create-namespace \
--set image.repository=14.103.114.237:30005/emotion-male-app \
--set image.tag=${{ gitea.run_id }}
- run: echo "🍏 This job's status is ${{ job.status }}."

24
Dockerfile Normal file
View File

@ -0,0 +1,24 @@
# 使用官方Node.js运行时作为基础镜像
FROM node:18-alpine
# 设置工作目录
WORKDIR /app
# 复制package.json和yarn.lock
COPY package.json yarn.lock* ./
# 安装项目依赖
RUN yarn install
# 复制项目文件
COPY . .
# 设置环境变量
ENV HOST=0.0.0.0
ENV PORT=3000
# 暴露端口
EXPOSE 3000
# 启动项目
CMD ["yarn", "dev"]

129
VIDEO_PLAYBACK_FIX.md Normal file
View File

@ -0,0 +1,129 @@
# 默认视频播放问题修复
## 问题描述
在性能优化过程中,发现默认视频 `d-3s.mp4``s-1.mp4` 没有正常播放的问题。
## 问题原因
1. **缓存策略过于激进**: 将缓存数量从3个减少到2个导致重要视频被过早清理
2. **缺少默认视频启动调用**: 在 `startCall()` 方法中没有调用 `startDefaultVideoStream()`
3. **重要视频保护不足**: 没有区分重要视频和普通视频的清理策略
## 修复措施
### 1. 优化缓存策略
```javascript
// 修复前
if (this.videoStreams.size >= 2) { // 缓存数量过少
const firstKey = this.videoStreams.keys().next().value;
// 直接清理第一个视频,可能包括重要视频
}
// 修复后
if (this.videoStreams.size >= 4) { // 增加缓存数量
const importantVideos = [this.defaultVideo, 's-1.mp4', 'd-3s.mp4'];
const videoToRemove = cachedVideos.find(video => !importantVideos.includes(video));
// 只清理非重要视频
}
```
### 2. 添加默认视频启动调用
```javascript
async startCall() {
// ... 其他代码 ...
// 启动默认视频流
await this.startDefaultVideoStream();
// 通知服务器通话开始
this.socket.emit('call-started');
}
```
### 3. 改进预加载策略
```javascript
async preloadCommonVideos() {
const videosToPreload = new Set([]);
// 添加重要视频(默认视频和常用视频)
videosToPreload.add(this.defaultVideo); // 默认视频
videosToPreload.add('s-1.mp4'); // 常用视频
videosToPreload.add('d-3s.mp4'); // 默认视频的另一个版本
// 添加视频映射中的所有视频
Object.values(this.videoMapping).forEach(video => {
videosToPreload.add(video);
});
}
```
### 4. 优化性能监控清理
```javascript
// 如果缓存过多,清理一些(但保护重要视频)
if (this.videoStreams.size > 5) {
const importantVideos = [this.defaultVideo, 's-1.mp4', 'd-3s.mp4'];
// 只清理非重要视频
const videosToRemove = cachedVideos.filter(video => !importantVideos.includes(video));
videosToRemove.slice(0, 2).forEach(key => {
this.cleanupVideoResources(key);
});
}
```
### 5. 调整性能测试阈值
```javascript
// 检查视频流数量
if (testResults.metrics.videoStreamsCount > 5) { // 从3增加到5
// 报告问题
}
// 检查动画帧数量
if (testResults.metrics.animationFramesCount > 3) { // 从2增加到3
// 报告问题
}
```
## 重要视频列表
以下视频被标记为重要视频,不会被自动清理:
- `d-3s.mp4` - 默认视频
- `s-1.mp4` - 常用视频
- 当前默认视频(`this.defaultVideo`
## 测试功能
添加了测试功能来验证默认视频播放:
1. **测试按钮**: "测试默认视频" 按钮
2. **测试方法**: `testDefaultVideoPlayback()`
3. **测试流程**:
- 检查默认视频文件是否存在
- 创建默认视频流
- 设置到视频元素并播放
- 5秒后自动停止测试
## 验证步骤
1. 启动应用
2. 点击"开始音频通话"
3. 观察默认视频是否开始播放
4. 点击"测试默认视频"按钮验证功能
5. 查看性能监控面板确认视频流数量
## 预期效果
修复后,默认视频应该能够:
1. **正常播放**: 通话开始时自动播放默认视频
2. **不被清理**: 重要视频不会被自动清理机制删除
3. **快速切换**: 预加载确保切换时响应迅速
4. **稳定运行**: 性能监控不会误报重要视频为问题
## 监控指标
- **视频流数量**: 正常范围 1-5 个
- **重要视频保护**: 确保 `d-3s.mp4``s-1.mp4` 不被清理
- **默认视频状态**: 通话开始时应该显示默认视频

22
docker-compose.yml Normal file
View File

@ -0,0 +1,22 @@
version: '3.8'
services:
webrtc-app:
build: .
ports:
- "3000:3000"
volumes:
- ./videos:/app/videos
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3000"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
networks:
- webrtc-network
networks:
webrtc-network:
driver: bridge

3
scene_state.json Normal file
View File

@ -0,0 +1,3 @@
{
"currentSceneIndex": 0
}

319
server.js
View File

@ -8,7 +8,17 @@ const { MessageHistory } = require('./src/message_history.js');
const app = express();
const server = http.createServer(app);
const io = socketIo(server);
const io = socketIo(server, {
pingTimeout: 300000, // 60秒超时
pingInterval: 25000, // 25秒心跳间隔
upgradeTimeout: 30000, // 30秒升级超时
allowEIO3: true, // 允许Engine.IO v3客户端
transports: ['websocket', 'polling'], // 支持多种传输方式
cors: {
origin: "*",
methods: ["GET", "POST"]
}
});
// 创建消息历史管理器
const messageHistory = new MessageHistory();
@ -85,18 +95,173 @@ app.delete('/api/messages/clear', async (req, res) => {
// 存储连接的客户端和他们的视频流状态
const connectedClients = new Map();
// 场景轮询系统
// 场景轮询系统 - 添加持久化
// 删除这行const fs = require('fs'); // 重复声明,需要删除
const sceneStateFile = path.join(__dirname, 'scene_state.json');
// 从文件加载场景状态
function loadSceneState() {
try {
if (fs.existsSync(sceneStateFile)) {
const data = fs.readFileSync(sceneStateFile, 'utf8');
const state = JSON.parse(data);
currentSceneIndex = state.currentSceneIndex || 0;
console.log(`从文件加载场景状态: ${currentSceneIndex} (${scenes[currentSceneIndex].name})`);
} else {
console.log('场景状态文件不存在,使用默认值: 0');
}
} catch (error) {
console.error('加载场景状态失败:', error);
currentSceneIndex = 0;
}
}
// 保存场景状态到文件
function saveSceneState() {
try {
const state = { currentSceneIndex };
fs.writeFileSync(sceneStateFile, JSON.stringify(state, null, 2));
console.log(`场景状态已保存: ${currentSceneIndex}`);
} catch (error) {
console.error('保存场景状态失败:', error);
}
}
let currentSceneIndex = 0;
const scenes = [
{
name: '起床',
defaultVideo: 'qc-bd-4.mp4',
interactionVideo: 'qc-hc-7.mp4',
tag: 'wakeup',
apiKey: 'bot-20250724150616-xqpz8', // 起床场景的API key
openingLines: [
// '你靠得近点...像梦里那样',
"刚醒就看到你,真好。",
// "昨晚梦里喊你名字喊醒了,现在视频看到你,心跳比梦里还烫。",
// "你穿我T恤的样子...让我想咬住衣领不放。",
"宝贝早...你声音比阳光还暖。",
// "喉咙里有咖啡味...要不要来尝尝?记得上次你咬我锁骨时,咖啡还温着。",
// "喂...嗯...刚醒...",
"困死我了...你起这么早啊?"
]
},
{
name: '开车',
defaultVideo: 'kc-bd-3.mp4',
interactionVideo: 'kc-sh-3.mp4',
tag: 'driving',
apiKey: 'bot-20250623140339-r8f8b', // 开车场景的API key
openingLines: [
'喂?刚把导航设好。你说吧,我听着呢。',
'喂,宝贝,我开车呢,刚出小区',
// '看到那个大钟楼没?每次路过都想你。想着要是能牵着你的手,站在最高层往下看该多好。',
'喂宝?开车呢,你说。',
'喂宝?路上呢,你说。',
// '宝贝起床了,刚过红绿灯就忍不住想你了,路上全是你的影子~',
// '车载音乐随机到那首我们接吻时哼的歌,心跳又乱了',
// '导航说今天要开58分钟其实想开58小时去你心里'
]
},
{
name: '咖啡',
defaultVideo: 'hc-bd-3.mp4',
interactionVideo: 'hc-sh-3(1).mp4',
tag: 'coffee',
apiKey: 'bot-20250804180724-4dgtk', // 喝茶场景的API key
openingLines: [
'喂宝?公司楼下喝咖啡呢。',
'宝贝,今天的第一杯咖啡,你喝了吗?',
'报告领导,咖啡补给中!',
'咖啡太好喝了,猜我在哪摸鱼呢。',
// '拉花师给我在咖啡里画了颗心形的奶泡,说是给视频里的小仙女加糖',
// '这杯好苦…但一看到你,就自动回甘了。比加十包糖都管用。你说你是不是我的专属甜味剂?'
]
},
{
name: '睡觉',
defaultVideo: '8-8-sj-bd.mp4',
interactionVideo: '8-8-sj-sh-1.mp4',
tag: 'sleep',
apiKey: 'bot-20250808120704-lbxwj', // 睡觉场景的API key
openingLines: [
'宝贝,一看到你,就不困了。',
'熄灯前最后一道光是你,真好。',
'宝贝困不困?我眼皮在打架了…',
'宝贝,困不困?',
// '捕捉到一只睡前小可爱…成功!',
'世界要静音了…但你的声音是白名单。多说几句?'
]
}
];
// 获取当前场景
function getCurrentScene() {
return scenes[currentSceneIndex];
}
// 切换到下一个场景 - 改进版
function switchToNextScene() {
const previousIndex = currentSceneIndex;
const previousScene = scenes[previousIndex].name;
currentSceneIndex = (currentSceneIndex + 1) % scenes.length;
const newScene = getCurrentScene();
console.log(`场景切换: ${previousScene}(${previousIndex}) → ${newScene.name}(${currentSceneIndex})`);
// 保存状态到文件
saveSceneState();
return newScene;
}
// 在服务器启动时加载场景状态
async function initializeServer() {
try {
// 加载场景状态
loadSceneState();
await messageHistory.initialize();
console.log('消息历史初始化完成');
console.log(`当前场景: ${getCurrentScene().name} (索引: ${currentSceneIndex})`);
} catch (error) {
console.error('初始化服务器失败:', error);
}
}
// 视频映射配置 - 动态更新
function getVideoMapping() {
const currentScene = getCurrentScene();
return {
'defaultVideo': currentScene.defaultVideo,
'interactionVideo': currentScene.interactionVideo,
'tag': currentScene.tag
};
}
// 默认视频流配置 - 动态获取
function getDefaultVideo() {
return getCurrentScene().defaultVideo;
}
let currentScene = getCurrentScene();
// 视频映射配置
const videoMapping = {
'say-6s-m-e': '1-m.mp4',
'default': '0.mp4',
'say-5s-amplitude': '2.mp4',
'say-5s-m-e': '4.mp4',
'say-5s-m-sw': '5.mp4',
'say-3s-m-sw': '6.mp4',
// 'say-6s-m-e': '1-m.mp4',
'default': currentScene.defaultVideo,
'8-4-sh': currentScene.interactionVideo,
'tag': currentScene.tag
// 'say-5s-amplitude': '2.mp4',
// 'say-5s-m-e': '4.mp4',
// 'say-5s-m-sw': 'd-0.mp4',
// 'say-3s-m-sw': '6.mp4',
};
// 默认视频流配置
const DEFAULT_VIDEO = '0.mp4';
const DEFAULT_VIDEO = currentScene.defaultVideo;
const INTERACTION_TIMEOUT = 10000; // 10秒后回到默认视频
// 获取视频列表
@ -113,26 +278,76 @@ app.get('/api/videos', (req, res) => {
});
});
// 获取当前场景信息的API接口
app.get('/api/current-scene', (req, res) => {
const scene = getCurrentScene();
res.json({
name: scene.name,
tag: scene.tag,
apiKey: scene.apiKey,
defaultVideo: scene.defaultVideo,
interactionVideo: scene.interactionVideo
});
});
// 获取视频映射
app.get('/api/video-mapping', (req, res) => {
res.json({ mapping: videoMapping });
const currentMapping = getVideoMapping();
const dynamicMapping = {
'default': currentMapping.defaultVideo,
'8-4-sh': currentMapping.interactionVideo,
'tag': currentMapping.tag
};
res.json({ mapping: dynamicMapping });
});
// 获取默认视频
app.get('/api/default-video', (req, res) => {
res.json({
defaultVideo: DEFAULT_VIDEO,
defaultVideo: getDefaultVideo(),
autoLoop: true
});
});
// 在现有的API接口后添加
app.get('/api/current-scene/opening-line', (req, res) => {
try {
const currentScene = getCurrentScene();
if (currentScene && currentScene.openingLines && currentScene.openingLines.length > 0) {
// 随机选择一个开场白
const randomIndex = Math.floor(Math.random() * currentScene.openingLines.length);
const selectedOpeningLine = currentScene.openingLines[randomIndex];
res.json({
success: true,
openingLine: selectedOpeningLine,
sceneName: currentScene.name,
sceneTag: currentScene.tag
});
} else {
res.json({
success: false,
message: '当前场景没有配置开场白'
});
}
} catch (error) {
console.error('获取开场白失败:', error);
res.status(500).json({
success: false,
message: '获取开场白失败',
error: error.message
});
}
});
// Socket.IO 连接处理
io.on('connection', (socket) => {
console.log('用户连接:', socket.id);
connectedClients.set(socket.id, {
socket: socket,
currentVideo: DEFAULT_VIDEO,
isInInteraction: false
currentVideo: getDefaultVideo(),
isInInteraction: false,
hasTriggeredSceneSwitch: false // 添加这个标志
});
// 处理WebRTC信令 - 用于传输视频流
@ -181,21 +396,21 @@ io.on('connection', (socket) => {
});
// 如果是交互类型,设置定时器回到默认视频
if (type === 'text' || type === 'voice') {
setTimeout(() => {
console.log(`交互超时,用户 ${socket.id} 回到默认视频`);
if (client) {
client.currentVideo = DEFAULT_VIDEO;
client.isInInteraction = false;
}
// 广播回到默认视频的指令
io.emit('video-stream-switched', {
videoFile: DEFAULT_VIDEO,
type: 'default',
from: socket.id
});
}, INTERACTION_TIMEOUT);
}
// if (type === 'text' || type === 'voice') {
// setTimeout(() => {
// console.log(`交互超时,用户 ${socket.id} 回到默认视频`);
// if (client) {
// client.currentVideo = getDefaultVideo();
// client.isInInteraction = false;
// }
// // 广播回到默认视频的指令
// io.emit('video-stream-switched', {
// videoFile: getDefaultVideo(),
// type: 'default',
// from: socket.id
// });
// }, INTERACTION_TIMEOUT);
// }
});
// 处理通话开始
@ -203,7 +418,7 @@ io.on('connection', (socket) => {
console.log('通话开始,用户:', socket.id);
const client = connectedClients.get(socket.id);
if (client) {
client.currentVideo = DEFAULT_VIDEO;
client.currentVideo = getDefaultVideo();
client.isInInteraction = false;
}
io.emit('call-started', { from: socket.id });
@ -262,15 +477,57 @@ io.on('connection', (socket) => {
console.log('用户请求回到默认视频:', socket.id);
const client = connectedClients.get(socket.id);
if (client) {
client.currentVideo = DEFAULT_VIDEO;
client.currentVideo = getDefaultVideo();
client.isInInteraction = false;
}
socket.emit('switch-video-stream', {
videoFile: DEFAULT_VIDEO,
videoFile: getDefaultVideo(),
type: 'default'
});
});
// 处理用户关闭连接事件
socket.on('user-disconnect', () => {
console.log('=== 场景切换开始 ===');
console.log('用户主动关闭连接:', socket.id);
console.log('切换前场景:', getCurrentScene().name, '(索引:', currentSceneIndex, ')');
// 切换到下一个场景
const newScene = switchToNextScene();
console.log('切换后场景:', newScene.name, '(索引:', currentSceneIndex, ')');
// 检查是否已经处理过场景切换
const client = connectedClients.get(socket.id);
if (client && client.hasTriggeredSceneSwitch) {
console.log('场景切换已处理,跳过重复触发');
return;
}
// 标记已处理场景切换
if (client) {
client.hasTriggeredSceneSwitch = true;
}
// 更新videoMapping
const newMapping = getVideoMapping();
videoMapping['default'] = newMapping.defaultVideo;
videoMapping['8-4-sh'] = newMapping.interactionVideo;
videoMapping['tag'] = newMapping.tag;
// 广播场景切换事件给所有客户端
io.emit('scene-switched', {
scene: newScene,
mapping: {
defaultVideo: newMapping.defaultVideo,
interactionVideo: newMapping.interactionVideo,
tag: newMapping.tag,
'default': newMapping.defaultVideo,
'8-4-sh': newMapping.interactionVideo
},
from: socket.id
});
});
// 断开连接
socket.on('disconnect', () => {
console.log('用户断开连接:', socket.id);
@ -280,7 +537,7 @@ io.on('connection', (socket) => {
// 启动服务器
const PORT = process.env.PORT || 3000;
server.listen(PORT, async () => {
server.listen(PORT, '0.0.0.0', async () => {
console.log(`服务器运行在端口 ${PORT}`);
await initializeServer();
});

View File

@ -3,6 +3,7 @@
class AudioProcessor {
constructor(options = {}) {
this.audioContext = null;
this.stream = null; // 添加这一行
this.isRecording = false;
this.audioChunks = [];
@ -311,22 +312,29 @@ class AudioProcessor {
}
// 开始录音
async startRecording() {
async startRecording(existingStream = null) {
try {
const stream = await navigator.mediaDevices.getUserMedia({
audio: {
sampleRate: 16000,
channelCount: 1,
echoCancellation: true,
noiseSuppression: true
}
});
// 如果有外部提供的音频流,使用它;否则获取新的
if (existingStream) {
this.stream = existingStream;
console.log('使用外部提供的音频流');
} else {
this.stream = await navigator.mediaDevices.getUserMedia({
audio: {
sampleRate: 16000,
channelCount: 1,
echoCancellation: true,
noiseSuppression: true
}
});
console.log('获取新的音频流');
}
this.audioContext = new (window.AudioContext || window.webkitAudioContext)({
sampleRate: 16000
});
const source = this.audioContext.createMediaStreamSource(stream);
const source = this.audioContext.createMediaStreamSource(this.stream);
const processor = this.audioContext.createScriptProcessor(4096, 1, 1);
processor.onaudioprocess = (event) => {
@ -343,6 +351,10 @@ class AudioProcessor {
source.connect(processor);
processor.connect(this.audioContext.destination);
// 保存处理器引用以便后续清理
this.processor = processor;
this.source = source;
this.isRecording = true;
this.onStatusUpdate('等待语音输入...', 'ready');
@ -362,8 +374,34 @@ class AudioProcessor {
// 停止录音
stopRecording() {
console.log('开始停止录音...');
// 断开音频节点连接
if (this.source) {
this.source.disconnect();
this.source = null;
}
if (this.processor) {
this.processor.disconnect();
this.processor = null;
}
// 停止所有音频轨道
if (this.stream) {
this.stream.getTracks().forEach(track => {
track.stop();
console.log(`停止音频轨道: ${track.label}`);
});
this.stream = null;
}
if (this.audioContext) {
this.audioContext.close();
this.audioContext.close().then(() => {
console.log('AudioContext已关闭');
}).catch(err => {
console.error('关闭AudioContext时出错:', err);
});
this.audioContext = null;
}
@ -377,12 +415,20 @@ class AudioProcessor {
this.handleSpeechEnd();
}
// 重置所有状态
this.isRecording = false;
this.isSpeaking = false;
this.audioBuffer = [];
this.audioChunks = [];
this.consecutiveFramesCount = 0;
this.frameBuffer = [];
this.onStatusUpdate('录音已停止', 'stopped');
console.log('录音已停止');
// 重置校准状态,确保下次启动时重新校准
this.noiseCalibrationSamples = [];
this.isCalibrated = false;
this.onStatusUpdate('录音已完全停止', 'stopped');
console.log('录音已完全停止,所有资源已释放');
}
// 获取录音状态

View File

@ -2,7 +2,7 @@
import { requestLLMStream } from './llm_stream.js';
import { requestMinimaxi } from './minimaxi_stream.js';
import { getLLMConfig, getMinimaxiConfig, getAudioConfig, validateConfig } from './config.js';
import { getLLMConfig, getLLMConfigByScene, getMinimaxiConfig, getAudioConfig, validateConfig } from './config.js';
// 防止重复播放的标志
let isPlaying = false;
@ -26,12 +26,13 @@ async function initializeHistoryMessage(recentCount = 5) {
const data = await response.json();
historyMessage = data.messages || [];
isInitialized = true;
console.log("历史消息初始化完成:", historyMessage.length, "条消息");
console.log("历史消息初始化完成:", historyMessage.length, "条消息", historyMessage);
return historyMessage;
} catch (error) {
console.error('获取历史消息失败,使用默认格式:', error);
historyMessage = [
{ role: 'system', content: 'You are a helpful assistant.' }
// { role: 'system', content: 'You are a helpful assistant.' }
];
isInitialized = true;
return historyMessage;
@ -42,7 +43,7 @@ async function initializeHistoryMessage(recentCount = 5) {
function getCurrentHistoryMessage() {
if (!isInitialized) {
console.warn('历史消息未初始化,返回默认消息');
return [{ role: 'system', content: 'You are a helpful assistant.' }];
return [];
}
return [...historyMessage]; // 返回副本,避免外部修改
}
@ -72,19 +73,26 @@ function updateHistoryMessage(userInput, assistantResponse) {
// 保存消息到服务端
async function saveMessage(userInput, assistantResponse) {
try {
// 验证参数是否有效
if (!userInput || !userInput.trim() || !assistantResponse || !assistantResponse.trim()) {
console.warn('跳过保存消息:用户输入或助手回复为空');
return;
}
const response = await fetch('/api/messages/save', {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body: JSON.stringify({
userInput,
assistantResponse
userInput: userInput.trim(),
assistantResponse: assistantResponse.trim()
})
});
if (!response.ok) {
throw new Error('保存消息失败');
const errorData = await response.json().catch(() => ({}));
throw new Error(`保存消息失败: ${response.status} ${errorData.error || response.statusText}`);
}
console.log('消息已保存到服务端');
@ -96,7 +104,7 @@ async function saveMessage(userInput, assistantResponse) {
async function chatWithAudioStream(userInput) {
// 确保历史消息已初始化
if (!isInitialized) {
await initializeHistoryMessage();
await initializeHistoryMessage(100);
}
// 验证配置
@ -106,16 +114,19 @@ async function chatWithAudioStream(userInput) {
console.log('用户输入:', userInput);
// 获取配置
const llmConfig = getLLMConfig();
// 获取当前场景对应的配置
const llmConfig = await getLLMConfigByScene();
const minimaxiConfig = getMinimaxiConfig();
const audioConfig = getAudioConfig();
console.log(`当前场景: ${llmConfig.sceneName} (${llmConfig.sceneTag})`);
console.log(`使用API Key: ${llmConfig.model}...`);
// 清空音频队列
audioQueue = [];
// 定义段落处理函数
const handleSegment = async (segment) => {
const handleSegment = async (segment, textPlay) => {
console.log('\n=== 处理文本段落 ===');
console.log('段落内容:', segment);
@ -134,6 +145,7 @@ async function chatWithAudioStream(userInput) {
audio_setting: audioConfig.audioSetting,
},
stream: true,
textPlay: textPlay
});
// 将音频添加到播放队列
@ -185,7 +197,7 @@ async function chatWithAudioStream(userInput) {
}
// 导出初始化函数,供外部调用
export { chatWithAudioStream, initializeHistoryMessage, getCurrentHistoryMessage };
export { chatWithAudioStream, initializeHistoryMessage, getCurrentHistoryMessage, saveMessage, updateHistoryMessage };
// 处理音频播放队列
async function processAudioQueue() {

View File

@ -1,94 +0,0 @@
// 示例配置文件 - 请复制此文件为 config.js 并填入实际的API密钥
export const config = {
// LLM API配置
llm: {
apiKey: 'your_ark_api_key_here', // 请替换为实际的ARK API密钥
model: 'bot-20250720193048-84fkp',
},
// Minimaxi API配置
minimaxi: {
apiKey: 'your_minimaxi_api_key_here', // 请替换为实际的Minimaxi API密钥
groupId: 'your_minimaxi_group_id_here', // 请替换为实际的Minimaxi Group ID
},
// 音频配置
audio: {
model: 'speech-02-hd',
voiceSetting: {
voice_id: 'yantu-qinggang',
speed: 1,
vol: 1,
pitch: 0,
emotion: 'happy',
},
audioSetting: {
sample_rate: 32000,
bitrate: 128000,
format: 'mp3',
},
},
// 系统配置
system: {
language_boost: 'auto',
output_format: 'hex',
stream: true,
},
};
// 验证配置是否完整
export function validateConfig() {
const requiredFields = [
'llm.apiKey',
'llm.model',
'minimaxi.apiKey',
'minimaxi.groupId'
];
const missingFields = [];
for (const field of requiredFields) {
const keys = field.split('.');
let value = config;
for (const key of keys) {
value = value[key];
if (!value) break;
}
if (!value || value === 'your_ark_api_key_here' || value === 'your_minimaxi_api_key_here' || value === 'your_minimaxi_group_id_here') {
missingFields.push(field);
}
}
if (missingFields.length > 0) {
console.warn('配置不完整,请检查以下字段:', missingFields);
return false;
}
return true;
}
// 获取配置的便捷方法
export function getLLMConfig() {
return {
apiKey: config.llm.apiKey,
model: config.llm.model,
};
}
export function getMinimaxiConfig() {
return {
apiKey: config.minimaxi.apiKey,
groupId: config.minimaxi.groupId,
};
}
export function getAudioConfig() {
return {
model: config.audio.model,
voiceSetting: config.audio.voiceSetting,
audioSetting: config.audio.audioSetting,
...config.system,
};
}

View File

@ -3,7 +3,7 @@ export const config = {
// LLM API配置
llm: {
apiKey: 'd012651b-a65b-4b13-8ff3-cc4ff3a29783', // 请替换为实际的API密钥
model: 'bot-20250720193048-84fkp',
model: 'bot-20250724150616-xqpz8',
},
// Minimaxi API配置
@ -16,7 +16,7 @@ export const config = {
audio: {
model: 'speech-02-hd',
voiceSetting: {
voice_id: 'yantu-qinggang-2',
voice_id: 'yantu-qinggang-demo2-male-4',
speed: 1,
vol: 1,
pitch: 0,
@ -70,13 +70,32 @@ export function validateConfig() {
}
// 获取配置的便捷方法
export function getLLMConfig() {
export function getLLMConfig(sceneApiKey = null) {
return {
apiKey: config.llm.apiKey,
model: config.llm.model,
apiKey: config.llm.apiKey, // 如果提供了场景API key则使用它
model: sceneApiKey || config.llm.model,
};
}
// 新增根据场景获取LLM配置
export async function getLLMConfigByScene() {
try {
const response = await fetch('/api/current-scene');
const sceneData = await response.json();
return {
apiKey: config.llm.apiKey,
model: sceneData.apiKey,
sceneTag: sceneData.tag,
sceneName: sceneData.name
};
} catch (error) {
console.warn('获取场景配置失败,使用默认配置:', error);
return getLLMConfig(); // 回退到默认配置
}
}
export function getMinimaxiConfig() {
return {
apiKey: config.minimaxi.apiKey,

View File

@ -1,26 +0,0 @@
// 调试音频数据
function debugAudioData(audioHex) {
console.log('=== 音频数据调试 ===');
console.log('音频数据长度:', audioHex.length);
console.log('音频数据前100个字符:', audioHex.substring(0, 100));
console.log('音频数据后100个字符:', audioHex.substring(audioHex.length - 100));
// 检查是否有重复模式
const halfLength = Math.floor(audioHex.length / 2);
const firstHalf = audioHex.substring(0, halfLength);
const secondHalf = audioHex.substring(halfLength);
if (firstHalf === secondHalf) {
console.log('⚠️ 警告:音频数据可能是重复的!');
} else {
console.log('✅ 音频数据没有重复');
}
}
// 如果在浏览器环境中运行
if (typeof window !== 'undefined') {
window.debugAudioData = debugAudioData;
console.log('音频调试函数已挂载到 window.debugAudioData');
}
export { debugAudioData };

BIN
src/favicon.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 81 KiB

View File

@ -2,71 +2,527 @@
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>WebRTC 音频通话</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=no">
<title>Soulmate In Parallels - 壹和零人工智能</title>
<link rel="stylesheet" href="styles.css">
<link rel="icon" type="image/png" sizes="48x48" href="favicon.png" />
<style>
/* 全屏视频样式 */
* {
margin: 0;
padding: 0;
box-sizing: border-box;
}
html, body {
height: 100%;
overflow: hidden;
background: linear-gradient(135deg, #87CEEB 0%, #B0E0E6 100%); /* 浅蓝色渐变背景 */
}
.container {
width: 100vw;
height: 100vh;
margin: 0;
padding: 0;
display: flex;
flex-direction: column;
position: relative;
}
.main-content {
flex: 1;
background: transparent;
border-radius: 0;
padding: 0;
box-shadow: none;
width: 100%;
height: 100%;
display: flex;
flex-direction: column;
}
.recorded-video-section {
flex: 1;
display: flex;
align-items: center;
justify-content: center;
width: 100%;
height: 100%;
position: relative;
/* 确保视频区域固定高度并居中 */
min-height: 100vh;
max-height: 100vh;
}
/* 视频容器样式 - 支持双缓冲固定9:16比例 */
.video-container {
position: relative;
width: 56.25vh; /* 9:16比例与视频宽度保持一致 */
height: 100vh;
overflow: hidden;
display: flex;
align-items: center;
justify-content: center;
margin: 0 auto; /* 水平居中 */
}
#recordedVideo, #recordedVideoBuffer {
position: absolute;
width: 56.25vh; /* 9:16比例高度为100vh时宽度为100vh * 9/16 = 56.25vh */
height: 100vh;
object-fit: cover;
border-radius: 0;
box-shadow: none;
transition: opacity 0.5s ease-in-out;
/* 确保视频始终居中 */
left: 50%;
top: 50%;
transform: translate(-50%, -50%);
}
/* 主视频默认显示 */
#recordedVideo {
opacity: 1;
z-index: 2;
}
/* 缓冲视频默认隐藏 */
#recordedVideoBuffer {
opacity: 0;
z-index: 1;
}
/* 切换状态 */
#recordedVideo.switching {
opacity: 0;
}
#recordedVideoBuffer.switching {
opacity: 1;
}
/* 加载状态 */
.video-loading {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
z-index: 10;
color: white;
font-size: 18px;
opacity: 0;
transition: opacity 0.3s ease;
}
.video-loading.show {
opacity: 1;
}
/* 等待连接提示样式 */
.connection-waiting {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
z-index: 20;
color: white;
font-size: 18px;
text-align: center;
background: rgba(0, 0, 0, 0.7);
padding: 30px;
border-radius: 15px;
backdrop-filter: blur(10px);
transition: opacity 0.3s ease;
}
.connection-waiting.show {
opacity: 1;
}
/* 加载动画 */
.loading-spinner {
width: 40px;
height: 40px;
border: 3px solid rgba(255, 255, 255, 0.3);
border-top: 3px solid white;
border-radius: 50%;
animation: spin 1s linear infinite;
margin: 0 auto 10px;
}
@keyframes spin {
0% { transform: rotate(0deg); }
100% { transform: rotate(360deg); }
}
/* 响应式设计 - 确保在不同屏幕尺寸下视频容器保持9:16比例 */
@media (max-width: 768px) {
.video-container {
height: 100vh;
width: 56.25vh; /* 9:16比例与视频宽度保持一致 */
}
#recordedVideo, #recordedVideoBuffer {
width: 56.25vh; /* 9:16比例 */
height: 100vh;
object-fit: cover;
}
}
@media (min-width: 769px) {
.video-container {
height: 100vh;
width: 56.25vh; /* 9:16比例与视频宽度保持一致 */
}
#recordedVideo, #recordedVideoBuffer {
width: 56.25vh; /* 9:16比例 */
height: 100vh;
object-fit: cover;
}
}
/* 横屏模式优化 */
@media (orientation: landscape) and (max-height: 500px) {
.video-container {
height: 100vh;
width: 56.25vh; /* 9:16比例与视频宽度保持一致 */
}
.controls {
bottom: 20px;
}
}
/* 竖屏模式优化 */
@media (orientation: portrait) {
.video-container {
height: 100vh;
width: 56.25vh; /* 9:16比例与视频宽度保持一致 */
}
}
.controls {
position: absolute;
bottom: 50px;
left: 50%;
transform: translateX(-50%);
z-index: 10;
display: flex !important;
flex-direction: row !important;
justify-content: center;
align-items: center;
gap: 20px;
}
/* 确保移动端也保持同一行 */
@media (max-width: 768px) {
.controls {
flex-direction: row !important;
gap: 15px;
}
}
#startButton {
width: 60px;
height: 60px;
border-radius: 50%;
background: rgba(34, 197, 94, 0.9);
backdrop-filter: blur(10px);
border: none;
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
transition: all 0.3s ease;
box-shadow: 0 4px 15px rgba(34, 197, 94, 0.3);
min-width: auto;
padding: 15px 30px;
font-size: 1.1rem;
border-radius: 25px;
min-width: 200px;
}
#startButton:hover:not(:disabled) {
background: rgba(22, 163, 74, 0.95);
transform: scale(1.1);
box-shadow: 0 6px 20px rgba(34, 197, 94, 0.5);
}
#startButton.connecting {
background: rgba(255, 193, 7, 0.9);
cursor: not-allowed;
}
#startButton.connecting:hover {
background: rgba(255, 193, 7, 0.9);
transform: none;
}
#startButton.calling {
background: rgba(255, 193, 7, 0.9);
animation: pulse 2s infinite;
}
#startButton.calling:hover {
background: rgba(255, 193, 7, 0.95);
transform: scale(1.05);
}
@keyframes pulse {
0% {
box-shadow: 0 4px 15px rgba(255, 193, 7, 0.3);
}
50% {
box-shadow: 0 6px 25px rgba(255, 193, 7, 0.6);
}
100% {
box-shadow: 0 4px 15px rgba(255, 193, 7, 0.3);
}
}
.audio-status {
position: absolute;
top: 20px;
left: 50%;
transform: translateX(-50%);
background: rgba(0, 0, 0, 0.7);
color: white;
padding: 8px 16px;
border-radius: 20px;
font-size: 14px;
z-index: 1000;
transition: all 0.3s ease;
}
.audio-status.connecting {
background: rgba(255, 193, 7, 0.9);
color: #000;
}
.audio-status.connected {
background: rgba(40, 167, 69, 0.9);
color: white;
}
.audio-status.error {
background: rgba(220, 53, 69, 0.9);
color: white;
}
#startButton svg {
width: 24px;
height: 24px;
fill: white;
}
#startButton:disabled {
opacity: 0.5;
cursor: not-allowed;
}
#stopButton {
width: 60px;
height: 60px;
border-radius: 50%;
background: rgba(220, 53, 69, 0.9);
backdrop-filter: blur(10px);
border: none;
cursor: pointer;
display: flex;
align-items: center;
justify-content: center;
transition: all 0.3s ease;
box-shadow: 0 4px 15px rgba(220, 53, 69, 0.3);
padding: 0; /* 确保没有内边距影响居中 */
}
#stopButton:hover:not(:disabled) {
background: rgba(200, 35, 51, 0.95);
transform: scale(1.1);
}
#stopButton svg {
width: 24px;
height: 24px;
display: block; /* 确保SVG作为块级元素 */
margin: auto; /* 额外的居中保证 */
}
#stopButton:disabled {
opacity: 0.5;
cursor: not-allowed;
}
/* 头像样式 - 确保显示 */
.avatar-container {
position: absolute;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
z-index: 15; /* 提高z-index确保在视频上方 */
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
transition: opacity 0.3s ease;
opacity: 1; /* 确保默认显示 */
}
.avatar-container.hidden {
opacity: 0;
pointer-events: none;
}
.avatar {
width: 120px;
height: 120px;
border-radius: 50%;
border: 4px solid rgba(255, 255, 255, 0.8);
box-shadow: 0 8px 32px rgba(0, 0, 0, 0.2);
/* background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); */
background: #000000;
display: flex;
align-items: center;
justify-content: center;
color: white;
font-size: 48px;
font-weight: bold;
overflow: hidden; /* 确保图片不会溢出 */
}
.avatar img {
width: 100%;
height: 100%;
border-radius: 50%;
object-fit: cover;
display: block; /* 确保图片显示 */
}
/* 确保视频默认隐藏 */
#recordedVideo, #recordedVideoBuffer {
position: absolute;
width: 56.25vh;
height: 100vh;
object-fit: cover;
border-radius: 0;
box-shadow: none;
/* transition: opacity 0.5s ease-in-out; */
left: 50%;
top: 50%;
transform: translate(-50%, -50%);
opacity: 1; /* 默认隐藏视频 */
z-index: 1; /* 确保在头像下方 */
}
/* 通话时隐藏头像,显示视频 */
.video-container.calling .avatar-container {
opacity: 0;
pointer-events: none;
}
.video-container.calling #recordedVideo {
opacity: 1;
z-index: 10;
}
</style>
</head>
<body>
<div class="container">
<header>
<!-- 隐藏的header -->
<header style="display: none;">
<h1>WebRTC 音频通话</h1>
<p>实时播放录制视频,支持文本和语音输入</p>
</header>
<div class="main-content">
<!-- 音频状态显示 -->
<div class="audio-status">
<!-- 音频状态显示 - 完全隐藏 -->
<div class="audio-status" style="display: none;">
<div class="status-indicator">
<span id="audioStatus">未连接</span>
<span id="audioStatus" style="display: none;">未连接</span>
</div>
</div>
<!-- 录制视频播放区域 -->
<!-- 录制视频播放区域 - 全屏显示 -->
<div class="recorded-video-section">
<h3>录制视频播放</h3>
<video id="recordedVideo" autoplay muted>
<source src="" type="video/mp4">
您的浏览器不支持视频播放
</video>
<div class="video-info">
<div class="video-container" id="videoContainer">
<!-- 头像容器 -->
<div class="avatar-container" id="avatarContainer">
<div class="avatar" id="avatar">
<!-- 使用相对路径引用图片 -->
<img src="./tx.png" alt="头像" onerror="this.style.display='none'; this.parentElement.innerHTML='壹和零';">
</div>
<!-- <div class="avatar-name">AI助手</div> -->
</div>
<!-- 主视频元素 -->
<video id="recordedVideo" autoplay muted>
<source src="" type="video/mp4">
您的浏览器不支持视频播放
</video>
<!-- 缓冲视频元素 -->
<video id="recordedVideoBuffer" autoplay muted>
<source src="" type="video/mp4">
您的浏览器不支持视频播放
</video>
<!-- 加载指示器 -->
<div class="video-loading" id="videoLoading">
<div class="loading-spinner"></div>
<!-- <div>正在切换视频...</div> -->
</div>
<!-- 等待连接提示 -->
<div class="connection-waiting" id="connectionWaiting" style="display: none;">
<div class="loading-spinner"></div>
<div style="color: white; font-size: 18px; margin-top: 10px;">等待连接通话中...</div>
</div>
</div>
<div class="video-info" style="display: none;">
<span id="currentVideoName">未选择视频</span>
</div>
</div>
<!-- 控制按钮 -->
<!-- 控制按钮 - 悬浮在视频上方 -->
<div class="controls">
<button id="startButton" class="btn btn-primary">开始音频通话</button>
<button id="stopButton" class="btn btn-danger" disabled>停止通话</button>
<!-- <button id="muteButton" class="btn btn-secondary">静音</button>
<button id="defaultVideoButton" class="btn btn-info">回到默认视频</button>
<button id="testVideoButton" class="btn btn-warning">测试视频文件</button> -->
<button id="startButton" class="btn btn-primary" title="开始通话">
<!-- 默认通话图标 -->
<svg id="callIcon" viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M6.62 10.79c1.44 2.83 3.76 5.14 6.59 6.59l2.2-2.2c.27-.27.67-.36 1.02-.24 1.12.37 2.33.57 3.57.57.55 0 1 .45 1 1V20c0 .55-.45 1-1 1-9.39 0-17-7.61-17-17 0-.55.45-1 1-1h3.5c.55 0 1 .45 1 1 0 1.25.2 2.45.57 3.57.11.35.03.74-.25 1.02l-2.2 2.2z" fill="white"/>
</svg>
<!-- 通话中文字显示(初始隐藏) -->
<span id="callingText" style="display: none; color: white; font-size: 14px;">正在通话中</span>
</button>
<button id="stopButton" class="btn btn-danger" disabled title="结束通话" style="display: none;">
<svg viewBox="0 0 24 24" fill="none" xmlns="http://www.w3.org/2000/svg">
<path d="M19.23 15.26l-2.54-.29c-.61-.07-1.21.14-1.64.57l-1.84 1.84c-2.83-1.44-5.15-3.75-6.59-6.59l1.85-1.85c.43-.43.64-1.03.57-1.64l-.29-2.52c-.12-1.01-.97-1.77-1.99-1.77H5.03c-1.13 0-2.07.94-2 2.07.53 8.54 7.36 15.36 15.89 15.89 1.13.07 2.07-.87 2.07-2v-1.73c.01-1.01-.75-1.86-1.76-1.98z" fill="white"/>
<line x1="18" y1="6" x2="6" y2="18" stroke="white" stroke-width="2"/>
</svg>
</button>
</div>
<!-- 输入区域 -->
<div class="input-section">
<!-- 隐藏的输入区域 -->
<div class="input-section" style="display: none;">
<div class="text-input-group">
<input type="text" id="textInput" placeholder="输入文本内容..." />
<button id="sendTextButton" class="btn btn-primary">发送文本</button>
</div>
</div>
<div class="voice-input-group">
<button id="startVoiceButton" class="btn btn-success">开始语音输入</button>
<button id="stopVoiceButton" class="btn btn-warning" disabled>停止语音输入</button>
<span id="voiceStatus">点击开始语音输入</span>
<!-- 隐藏的视频选择 -->
<div class="video-selection" style="display: none;">
<h3>选择要播放的视频</h3>
<div id="videoList" class="video-list">
<!-- 视频列表将在这里动态生成 -->
</div>
</div>
<!-- 视频选择 -->
<!-- <div class="video-selection">
<h3>选择要播放的视频</h3>
<div id="videoList" class="video-list">
视频列表将在这里动态生成 -->
<!-- </div>
</div> -->
<!-- 状态显示 -->
<div class="status-section">
<div id="connectionStatus" class="status">未连接</div>
<!-- 隐藏的状态显示 -->
<div class="status-section" style="display: none;">
<div id="connectionStatus" class="status" style="display: none;">未连接</div>
<div id="messageLog" class="message-log"></div>
</div>
</div>

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,35 @@
// 以流式方式请求LLM大模型接口并打印流式返回内容
// 过滤旁白内容的函数
function filterNarration(text) {
if (!text) return text;
// 匹配各种括号内的旁白内容
// 包括:()、【】、[]、{}、〈〉、《》等
const narrationPatterns = [
/[^]*/g, // 中文圆括号
/\([^)]*\)/g, // 英文圆括号
/【[^】]*】/g, // 中文方括号
/\[[^\]]*\]/g, // 英文方括号
/\{[^}]*\}/g, // 花括号
/〈[^〉]*〉/g, // 中文尖括号
/《[^》]*》/g, // 中文书名号
/<[^>]*>/g // 英文尖括号
];
let filteredText = text;
// 逐个应用过滤规则
narrationPatterns.forEach(pattern => {
filteredText = filteredText.replace(pattern, '');
});
// 清理多余的空格和换行
filteredText = filteredText.replace(/\s+/g, ' ').trim();
return filteredText;
}
async function requestLLMStream({ apiKey, model, messages, onSegment }) {
const response = await fetch('https://ark.cn-beijing.volces.com/api/v3/bots/chat/completions', {
method: 'POST',
@ -29,7 +59,7 @@ async function requestLLMStream({ apiKey, model, messages, onSegment }) {
let pendingText = ''; // 待处理的文本片段
// 分段分隔符
const segmentDelimiters = /[,。:;!?,.:;!?]/;
const segmentDelimiters = /[,。:;!?,.:;!?]|\.{3,}|……|…/;
while (!done) {
const { value, done: doneReading } = await reader.read();
@ -51,9 +81,17 @@ async function requestLLMStream({ apiKey, model, messages, onSegment }) {
if (jsonStr === '[DONE]') {
console.log('LLM SSE流结束');
// 处理最后的待处理文本
// 处理最后的待处理文本无论长度是否大于5个字
if (pendingText.trim() && onSegment) {
await onSegment(pendingText.trim());
console.log('处理最后的待处理文本:', pendingText.trim());
// 过滤旁白内容
const filteredText = filterNarration(pendingText.trim());
if (filteredText.trim()) {
console.log('过滤旁白后的最后文本:', filteredText);
await onSegment(filteredText, true);
} else {
console.log('最后的文本被完全过滤,跳过');
}
}
continue;
}
@ -64,27 +102,50 @@ async function requestLLMStream({ apiKey, model, messages, onSegment }) {
const deltaContent = obj.choices[0].delta.content;
content += deltaContent;
pendingText += deltaContent;
console.log('LLM内容片段:', deltaContent);
console.log('【未过滤】LLM内容片段:', pendingText);
// 检查是否包含分段分隔符
if (segmentDelimiters.test(pendingText)) {
// 按分隔符分割文本
const segments = pendingText.split(segmentDelimiters);
// 先过滤旁白,再检查分段分隔符
const filteredPendingText = filterNarration(pendingText);
// 检查过滤后的文本是否包含分段分隔符
if (segmentDelimiters.test(filteredPendingText)) {
// 按分隔符分割已过滤的文本
const segments = filteredPendingText.split(segmentDelimiters);
// 重新组合处理:只处理足够长的完整段落
let accumulatedText = '';
let hasProcessed = false;
// 处理完整的段落(除了最后一个,因为可能不完整)
for (let i = 0; i < segments.length - 1; i++) {
const segment = segments[i].trim();
if (segment && onSegment) {
// 找到对应的分隔符
const delimiterMatch = pendingText.match(segmentDelimiters);
const segmentWithDelimiter = segment + (delimiterMatch ? delimiterMatch[0] : '');
console.log('检测到完整段落:', segmentWithDelimiter);
await onSegment(segmentWithDelimiter);
if (segment) {
accumulatedText += segment;
// 找到分隔符
const delimiterMatch = filteredPendingText.match(segmentDelimiters);
if (delimiterMatch) {
accumulatedText += delimiterMatch[0];
}
// 如果累积文本长度大于5个字处理它
if (accumulatedText.length > 8 && onSegment) {
console.log('【已过滤】检测到完整段落:', accumulatedText);
// 文本已经过滤过旁白,直接使用
if (accumulatedText.trim()) {
console.log('处理过滤后的文本:', accumulatedText);
await onSegment(accumulatedText, false);
}
hasProcessed = true;
accumulatedText = ''; // 重置
}
}
}
// 保留最后一个不完整的段落
pendingText = segments[segments.length - 1] || '';
// 更新pendingText - 使用原始文本但需要相应调整
if (hasProcessed) {
// 计算已处理的原始文本长度更新pendingText
const processedLength = pendingText.length - (segments[segments.length - 1] || '').length;
pendingText = pendingText.substring(processedLength);
}
}
}
} catch (e) {

View File

@ -56,12 +56,12 @@ class MessageHistory {
const messages = [];
// 添加系统消息
if (includeSystem) {
messages.push({
role: 'system',
content: 'You are a helpful assistant.'
});
}
// if (includeSystem) {
// messages.push({
// role: 'system',
// content: 'You are a helpful assistant.'
// });
// }
// 获取最近的对话历史
const recentMessages = this.messages.slice(-recentCount * 2); // 用户+助手成对出现

View File

@ -52,25 +52,26 @@ async function addAudioToQueue(audioHex) {
console.error('音频解码失败:', error);
}
}
let isFirstChunk = true;
// 队列处理器 - 独立运行,按顺序播放音频
async function processAudioQueue() {
if (isProcessingQueue) return;
isProcessingQueue = true;
console.log('开始处理音频队列');
let isFirstChunk = true;
while (audioQueue.length > 0 || isPlaying) {
while (audioQueue.length > 0 && !isPlaying) {
console.log('开始处理音频队列');
// 如果当前没有音频在播放,且队列中有音频
if (!isPlaying && audioQueue.length > 0) {
const audioItem = audioQueue.shift();
const sayName = 'say-5s-m-sw'
const targetVideo = '5.mp4'
const sayName = '8-4-sh'
const targetVideo = window.webrtcApp.interactionVideo
// 如果是第一个音频片段,触发视频切换
if (sayName != window.webrtcApp.currentVideoTag && window.webrtcApp && window.webrtcApp.handleTextInput) {
if (sayName != window.webrtcApp.currentVideoTag && window.webrtcApp && window.webrtcApp.switchVideoStream) {
try {
console.log('--------------触发视频切换:', sayName);
await window.webrtcApp.switchVideoWithReplaceTrack(targetVideo, 'audio', 'say-5s-m-sw');
window.webrtcApp.switchVideoStream(targetVideo, 'audio', '8-4-sh');
isFirstChunk = false;
window.webrtcApp.currentVideoTag = sayName;
} catch (error) {
@ -85,12 +86,21 @@ async function processAudioQueue() {
}
isProcessingQueue = false;
const text = 'default'
await window.webrtcApp.socket.emit('voice-input', { text });
if (window.webrtcApp.currentVideoTag != text) {
// 等待当前音频播放完成后再切换回默认视频
// while (isPlaying) {
// console.log("触发音频等待")
// await new Promise(resolve => setTimeout(resolve, 1000));
// }
// console.log("触发音频等待")
// await new Promise(resolve => setTimeout(resolve, 300));
const text = 'default'
console.log("音频结束------------------------", window.webrtcApp.currentVideoTag, isPlaying)
if (window.webrtcApp.currentVideoTag != text && !isPlaying) {
isFirstChunk = true
window.webrtcApp.currentVideoTag = text
await window.webrtcApp.switchVideoWithReplaceTrack(window.webrtcApp.defaultVideo, 'audio', text);
window.webrtcApp.switchVideoStream(window.webrtcApp.defaultVideo, 'audio', text);
}
console.log('音频队列处理完成');
}
@ -113,13 +123,13 @@ function playAudioData(audioData) {
};
// 超时保护
setTimeout(() => {
if (isPlaying) {
console.log('音频播放超时,强制结束');
isPlaying = false;
resolve();
}
}, (audioData.duration + 0.5) * 1000);
// setTimeout(() => {
// if (isPlaying) {
// console.log('音频播放超时,强制结束');
// isPlaying = false;
// resolve();
// }
// }, (audioData.duration + 0.5) * 1000);
source.start(0);
console.log(`开始播放音频片段,时长: ${audioData.duration}`);
@ -152,10 +162,10 @@ function getQueueStatus() {
// 移除waitForCurrentAudioToFinish函数不再需要
async function requestMinimaxi({ apiKey, groupId, body, stream = true }) {
async function requestMinimaxi({ apiKey, groupId, body, stream = true , textPlay = false}) {
const url = `https://api.minimaxi.com/v1/t2a_v2`;
const reqBody = { ...body, stream };
isPlaying = textPlay
// 添加这两行变量定义
let isFirstChunk = true;
// const currentText = body.text;
@ -222,8 +232,8 @@ async function requestMinimaxi({ apiKey, groupId, body, stream = true }) {
// 流式解析每个chunk实时播放音频
if (obj.data && obj.data.audio && obj.data.status === 1) {
console.log('收到音频数据片段!', obj.data.audio.length);
audioHex += obj.data.audio;
// audioHex += obj.data.audio;
audioHex = obj.data.audio;
// const sayName = 'say-5s-m-sw'
// // 如果是第一个音频片段,触发视频切换
// if (isFirstChunk && sayName != window.webrtcApp.currentVideoName && window.webrtcApp && window.webrtcApp.handleTextInput) {
@ -244,7 +254,7 @@ async function requestMinimaxi({ apiKey, groupId, body, stream = true }) {
// const text = 'default'
// await window.webrtcApp.socket.emit('text-input', { text });
// await window.webrtcApp.handleTextInput(text);
lastFullResult = obj;
lastFullResult = null;
console.log('收到最终状态');
}
} catch (e) {
@ -261,7 +271,7 @@ async function requestMinimaxi({ apiKey, groupId, body, stream = true }) {
const obj = JSON.parse(line);
if (obj.data && obj.data.audio) {
console.log('收到无data:音频数据!', obj.data.audio.length);
audioHex += obj.data.audio;
audioHex = obj.data.audio;
// 立即播放这个音频片段
await playAudioChunk(obj.data.audio);
@ -421,4 +431,4 @@ function generateUUID() {
});
}
export { requestMinimaxi, requestVolcanTTS };
export { requestMinimaxi, requestVolcanTTS, addAudioToQueue };

View File

@ -1,346 +0,0 @@
let ASRTEXT = ''
class HttpASRRecognizer {
constructor() {
this.mediaRecorder = null;
this.audioContext = null;
this.isRecording = false;
this.audioChunks = [];
// VAD相关属性
this.isSpeaking = false;
this.silenceThreshold = 0.01;
this.silenceTimeout = 1000;
this.minSpeechDuration = 300;
this.silenceTimer = null;
this.speechStartTime = null;
this.audioBuffer = [];
// API配置
this.apiConfig = {
url: 'https://openspeech.bytedance.com/api/v3/auc/bigmodel/recognize/flash',
headers: {
'X-Api-App-Key': '1988591469',
'X-Api-Access-Key': 'mdEyhgZ59on1-NK3GXWAp3L4iLldSG0r',
'X-Api-Resource-Id': 'volc.bigasr.auc_turbo',
'X-Api-Request-Id': this.generateUUID(),
'X-Api-Sequence': '-1',
'Content-Type': 'application/json'
}
};
this.recordBtn = document.getElementById('startVoiceButton');
this.statusDiv = document.getElementById('status');
this.resultsDiv = document.getElementById('results');
this.initEventListeners();
}
initEventListeners() {
this.recordBtn.addEventListener('click', () => {
if (this.isRecording) {
this.stopRecording();
} else {
this.startRecording();
}
});
}
// 生成UUID
generateUUID() {
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, function(c) {
const r = Math.random() * 16 | 0;
const v = c == 'x' ? r : (r & 0x3 | 0x8);
return v.toString(16);
});
}
// 计算音频能量(音量)
calculateAudioLevel(audioData) {
let sum = 0;
for (let i = 0; i < audioData.length; i++) {
sum += audioData[i] * audioData[i];
}
return Math.sqrt(sum / audioData.length);
}
// 语音活动检测
detectVoiceActivity(audioData) {
const audioLevel = this.calculateAudioLevel(audioData);
const currentTime = Date.now();
if (audioLevel > this.silenceThreshold) {
if (!this.isSpeaking) {
this.isSpeaking = true;
this.speechStartTime = currentTime;
this.audioBuffer = [];
this.updateStatus('检测到语音,开始录音...', 'speaking');
console.log('开始说话');
}
if (this.silenceTimer) {
clearTimeout(this.silenceTimer);
this.silenceTimer = null;
}
return true;
} else {
if (this.isSpeaking && !this.silenceTimer) {
this.silenceTimer = setTimeout(() => {
this.onSpeechEnd();
}, this.silenceTimeout);
}
return this.isSpeaking;
}
}
// 语音结束处理
async onSpeechEnd() {
if (this.isSpeaking) {
const speechDuration = Date.now() - this.speechStartTime;
if (speechDuration >= this.minSpeechDuration) {
console.log(`语音结束,时长: ${speechDuration}ms`);
await this.processAudioBuffer();
// this.updateStatus('语音识别中...', 'processing');
console.log('语音识别中')
} else {
console.log('说话时长太短,忽略');
// this.updateStatus('等待语音输入...', 'ready');
console.log('等待语音输入...')
}
this.isSpeaking = false;
this.speechStartTime = null;
this.audioBuffer = [];
}
if (this.silenceTimer) {
clearTimeout(this.silenceTimer);
this.silenceTimer = null;
}
}
// 处理音频缓冲区并发送到API
async processAudioBuffer() {
if (this.audioBuffer.length === 0) {
return;
}
try {
// 合并所有音频数据
const totalLength = this.audioBuffer.reduce((sum, buffer) => sum + buffer.length, 0);
const combinedBuffer = new Float32Array(totalLength);
let offset = 0;
for (const buffer of this.audioBuffer) {
combinedBuffer.set(buffer, offset);
offset += buffer.length;
}
// 转换为WAV格式并编码为base64
const wavBuffer = this.encodeWAV(combinedBuffer, 16000);
const base64Audio = this.arrayBufferToBase64(wavBuffer);
// 调用ASR API
await this.callASRAPI(base64Audio);
} catch (error) {
console.error('处理音频数据失败:', error);
this.updateStatus('识别失败', 'error');
}
}
// 调用ASR API
async callASRAPI(base64AudioData) {
try {
const requestBody = {
user: {
uid: "1988591469"
},
audio: {
data: base64AudioData
},
request: {
model_name: "bigmodel"
}
};
const response = await fetch(this.apiConfig.url, {
method: 'POST',
headers: this.apiConfig.headers,
body: JSON.stringify(requestBody)
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const result = await response.json();
this.handleASRResponse(result);
} catch (error) {
console.error('ASR API调用失败:', error);
this.updateStatus('API调用失败', 'error');
}
}
// 处理ASR响应
handleASRResponse(response) {
console.log('ASR响应:', response);
if (response && response.data && response.data.result) {
ASRTEXT = response.data.result;
// this.displayResult(text);
// this.updateStatus('识别完成', 'completed');
console.log('识别完成')
} else {
console.log('未识别到文字');
// this.updateStatus('未识别到文字', 'ready');
}
}
// 显示识别结果
displayResult(text) {
const resultElement = document.createElement('div');
resultElement.className = 'result-item';
resultElement.innerHTML = `
<span class="timestamp">${new Date().toLocaleTimeString()}</span>
<span class="text">${text}</span>
`;
this.resultsDiv.appendChild(resultElement);
this.resultsDiv.scrollTop = this.resultsDiv.scrollHeight;
}
// 更新状态显示
updateStatus(message, status) {
this.statusDiv.textContent = message;
this.statusDiv.className = `status ${status}`;
}
// 编码WAV格式
encodeWAV(samples, sampleRate) {
const length = samples.length;
const buffer = new ArrayBuffer(44 + length * 2);
const view = new DataView(buffer);
// WAV文件头
const writeString = (offset, string) => {
for (let i = 0; i < string.length; i++) {
view.setUint8(offset + i, string.charCodeAt(i));
}
};
writeString(0, 'RIFF');
view.setUint32(4, 36 + length * 2, true);
writeString(8, 'WAVE');
writeString(12, 'fmt ');
view.setUint32(16, 16, true);
view.setUint16(20, 1, true);
view.setUint16(22, 1, true);
view.setUint32(24, sampleRate, true);
view.setUint32(28, sampleRate * 2, true);
view.setUint16(32, 2, true);
view.setUint16(34, 16, true);
writeString(36, 'data');
view.setUint32(40, length * 2, true);
// 写入音频数据
let offset = 44;
for (let i = 0; i < length; i++) {
const sample = Math.max(-1, Math.min(1, samples[i]));
view.setInt16(offset, sample * 0x7FFF, true);
offset += 2;
}
return buffer;
}
// ArrayBuffer转Base64
arrayBufferToBase64(buffer) {
let binary = '';
const bytes = new Uint8Array(buffer);
for (let i = 0; i < bytes.byteLength; i++) {
binary += String.fromCharCode(bytes[i]);
}
return btoa(binary);
}
async startRecording() {
try {
const stream = await navigator.mediaDevices.getUserMedia({
audio: {
sampleRate: 16000,
channelCount: 1,
echoCancellation: true,
noiseSuppression: true
}
});
this.audioContext = new (window.AudioContext || window.webkitAudioContext)({
sampleRate: 16000
});
const source = this.audioContext.createMediaStreamSource(stream);
const processor = this.audioContext.createScriptProcessor(4096, 1, 1);
processor.onaudioprocess = (event) => {
const inputBuffer = event.inputBuffer;
const inputData = inputBuffer.getChannelData(0);
// 语音活动检测
if (this.detectVoiceActivity(inputData)) {
// 如果检测到语音活动,缓存音频数据
this.audioBuffer.push(new Float32Array(inputData));
}
};
source.connect(processor);
processor.connect(this.audioContext.destination);
this.isRecording = true;
this.recordBtn.textContent = '停止录音';
this.recordBtn.className = 'btn recording';
// this.updateStatus('等待语音输入...', 'ready');
} catch (error) {
console.error('启动录音失败:', error);
// this.updateStatus('录音启动失败', 'error');
}
}
stopRecording() {
if (this.audioContext) {
this.audioContext.close();
this.audioContext = null;
}
if (this.silenceTimer) {
clearTimeout(this.silenceTimer);
this.silenceTimer = null;
}
// 如果正在说话,处理最后的音频
if (this.isSpeaking) {
this.onSpeechEnd();
}
this.isRecording = false;
this.isSpeaking = false;
this.audioBuffer = [];
this.recordBtn.textContent = '开始录音';
this.recordBtn.className = 'btn';
console.log('录音已停止');
// this.updateStatus('录音已停止', 'stopped');
}
}
// 初始化应用
document.addEventListener('DOMContentLoaded', () => {
const asrRecognizer = new HttpASRRecognizer();
console.log('HTTP ASR识别器已初始化');
});

View File

@ -101,6 +101,14 @@ header p {
.recorded-video-section {
margin-bottom: 30px;
text-align: center;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
/* 确保视频区域固定高度并居中 */
min-height: 100vh;
max-height: 100vh;
width: 100%;
}
.recorded-video-section h3 {
@ -109,14 +117,22 @@ header p {
}
#recordedVideo {
max-width: 100%;
max-height: 100%;
width: 100%;
max-width: 400px; /* 限制最大宽度 */
aspect-ratio: 9/16; /* 固定9:16比例 */
border-radius: 10px;
box-shadow: 0 5px 15px rgba(0,0,0,0.2);
object-fit: cover; /* 确保视频填充容器 */
background: #000; /* 视频背景色 */
height: 100%;
border-radius: 0;
box-shadow: none;
object-fit: cover; /* 覆盖整个容器 */
background: transparent; /* 透明背景 */
transition: opacity 0.15s; /* 添加透明度过渡效果 */
margin: 0 auto; /* 左右居中 */
display: block; /* 确保块级显示 */
/* 确保视频始终居中 */
position: absolute;
left: 50%;
top: 50%;
transform: translate(-50%, -50%);
}
/* 视频加载时的样式 */
@ -423,6 +439,50 @@ header p {
.video-list {
grid-template-columns: 1fr;
}
/* 移动端视频容器优化 */
.video-container {
height: 100vh;
width: 100vw;
}
#recordedVideo {
width: 100%;
height: 100%;
object-fit: cover;
}
}
/* 桌面端视频容器优化 */
@media (min-width: 769px) {
.video-container {
height: 100vh;
width: 100vw;
}
#recordedVideo {
width: 100%;
height: 100%;
object-fit: cover;
}
}
/* 横屏模式优化 */
@media (orientation: landscape) and (max-height: 500px) {
.video-container {
height: 100vh;
}
.controls {
bottom: 20px;
}
}
/* 竖屏模式优化 */
@media (orientation: portrait) {
.video-container {
height: 100vh;
}
}
/* 动画效果 */
@ -448,42 +508,22 @@ header p {
}
#recordedVideo {
transition: opacity 0.2s ease-in-out;
transition: opacity 0.1s ease-in-out; /* 缩短过渡时间 */
background-color: #1a1a1a; /* 深灰色背景,避免纯黑 */
}
#recordedVideo.loading {
opacity: 0.8; /* 加载时稍微降低透明度,但不完全隐藏 */
opacity: 0.9; /* 提高loading时的透明度减少黑屏感 */
}
#recordedVideo.playing {
opacity: 1;
}
/* 添加加载指示器 */
.video-container {
position: relative;
}
.video-container::before {
content: '';
position: absolute;
top: 50%;
left: 50%;
width: 40px;
height: 40px;
margin: -20px 0 0 -20px;
border: 3px solid #333;
border-top: 3px solid #fff;
border-radius: 50%;
animation: spin 1s linear infinite;
opacity: 0;
z-index: 10;
transition: opacity 0.3s;
}
/* 优化加载指示器 */
.video-container.loading::before {
opacity: 1;
opacity: 0.8; /* 降低加载指示器的透明度 */
border-top-color: #667eea; /* 使用主题色 */
}
@keyframes spin {

BIN
src/tx.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.1 MiB

View File

@ -1,44 +0,0 @@
import { requestMinimaxi } from './minimaxi_stream.js';
import { getMinimaxiConfig } from './config.js';
export async function playVideoWithAudio(videoPath, text) {
// 1. 初始化视频播放
const video = document.createElement('video');
video.src = videoPath;
document.body.appendChild(video);
// 2. 启动音频合成流
const minimaxiConfig = getMinimaxiConfig();
const audioStream = await requestMinimaxi({
apiKey: minimaxiConfig.apiKey,
groupId: minimaxiConfig.groupId,
body: {
model: 'speech-02-hd',
text,
output_format: 'hex', // 流式场景必须使用hex
voice_setting: {
voice_id: 'yantu-qinggang',
speed: 1
}
},
stream: true
});
// 3. 将音频hex转换为可播放格式
const audioCtx = new AudioContext();
const audioBuffer = await audioCtx.decodeAudioData(
hexToArrayBuffer(audioStream.data.audio)
);
// 4. 同步播放
const source = audioCtx.createBufferSource();
source.buffer = audioBuffer;
source.connect(audioCtx.destination);
video.play();
source.start(0);
}
function hexToArrayBuffer(hex) {
// ... hex转ArrayBuffer实现
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
videos/8-8-sj-bd.mp4 Normal file

Binary file not shown.

BIN
videos/8-8-sj-sh-1.mp4 Normal file

Binary file not shown.

BIN
videos/8-8-sj-sh.mp4 Normal file

Binary file not shown.

BIN
videos/dj.mp4 Normal file

Binary file not shown.

BIN
videos/hc-bd-3.mp4 Normal file

Binary file not shown.

BIN
videos/hc-sh-3(1).mp4 Normal file

Binary file not shown.

BIN
videos/hc-sh-3.mp4 Normal file

Binary file not shown.

BIN
videos/kc-bd-3.mp4 Normal file

Binary file not shown.

BIN
videos/kc-sh-3.mp4 Normal file

Binary file not shown.

BIN
videos/qc-bd-4.mp4 Normal file

Binary file not shown.

BIN
videos/qc-hc-7.mp4 Normal file

Binary file not shown.

BIN
videos/qc-sh-4.mp4 Normal file

Binary file not shown.