feat: 添加确认模态框并优化音频生成流程
- 新增确认生成模态框组件,支持多语言显示 - 调整音频时长选项为"5分钟左右"和"8-15分钟" - 优化Docker配置,添加.env和config目录挂载 - 改进音频生成流程,增加静音修剪功能 - 更新多语言翻译文件,添加确认相关文本 - 修复播客内容组件中overview_content处理逻辑 - 优化中间件配置,排除robots.txt和sitemap.xml - 完善Docker使用文档,补充挂载点说明 - 改进播客脚本提示词,增强对话深度要求
This commit is contained in:
@@ -39,7 +39,7 @@ docker build -t podcast-server -f Dockerfile-Server .
|
||||
#### 运行 Web 应用容器
|
||||
|
||||
```bash
|
||||
docker run -d -p 3200:3000 -v /opt/audio:/app/server/output --restart always --name podcast-web podcast-web
|
||||
docker run -d -p 3200:3000 -v /opt/audio:/app/server/output -v /opt/sqlite.db:/app/web/sqlite.db -v /opt/audio/.env:/app/web/.env -v /opt/audio/config:/app/config --restart always --name podcast-web podcast-web
|
||||
```
|
||||
|
||||
#### 命令说明:
|
||||
@@ -48,6 +48,8 @@ docker run -d -p 3200:3000 -v /opt/audio:/app/server/output --restart always --n
|
||||
* `-p 3200:3000`:将宿主机的 3200 端口映射到容器的 3000 端口。Next.js 应用程序在容器内部的 3000 端口上运行。
|
||||
* `-v /opt/audio:/app/server/output`:将宿主机的 `/opt/audio` 目录挂载到容器内的 `/app/server/output` 目录,用于音频文件的持久化存储。
|
||||
* `-v /opt/sqlite.db:/app/web/sqlite.db`:将宿主机的 `/opt/sqlite.db` 文件挂载到容器内的 `/app/web/sqlite.db` 文件,用于数据库的持久化存储。
|
||||
* `-v /opt/audio/.env:/app/web/.env`:将宿主机的 `/opt/audio/.env` 文件挂载到容器内的 `/app/web/.env` 文件,用于配置环境变量。
|
||||
* `-v /opt/audio/config:/app/config`:将宿主机的 `/opt/audio/config` 目录挂载到容器内的 `/app/config` 目录,用于配置文件的持久化存储。
|
||||
* `--restart always`:设置容器的重启策略,确保容器在意外停止或系统重启后能自动重启。
|
||||
* `--name podcast-web`:为运行中的容器指定一个名称,方便后续管理。
|
||||
* `podcast-web`:指定要运行的 Docker 镜像名称。
|
||||
@@ -78,6 +80,13 @@ docker run -d -p 3100:8000 -v /opt/audio:/app/server/output --restart always --n
|
||||
|
||||
项目提供了 `docker-compose.yml` 文件,可以更方便地管理和部署整个应用。
|
||||
|
||||
Docker Compose 文件中定义了以下挂载点:
|
||||
|
||||
* `/opt/audio/output:/app/server/output`:将宿主机的 `/opt/audio/output` 目录挂载到容器内的 `/app/server/output` 目录,用于音频文件的持久化存储。
|
||||
* `/opt/audio/sqlite.db:/app/web/sqlite.db`:将宿主机的 `/opt/audio/sqlite.db` 文件挂载到容器内的 `/app/web/sqlite.db` 文件,用于数据库的持久化存储。
|
||||
* `/opt/audio/.env:/app/web/.env`:将宿主机的 `/opt/audio/.env` 文件挂载到容器内的 `/app/web/.env` 文件,用于配置环境变量。
|
||||
* `/opt/audio/config:/app/config`:将宿主机的 `/opt/audio/config` 目录挂载到容器内的 `/app/config` 目录,用于配置文件的持久化存储。
|
||||
|
||||
### 启动服务
|
||||
|
||||
在项目根目录下执行以下命令启动所有服务:
|
||||
@@ -122,5 +131,8 @@ docker-compose logs server
|
||||
|
||||
1. 请确保宿主机上的端口 3100 和 3200 未被其他应用程序占用。
|
||||
2. 请确保宿主机上的 `/opt/audio` 目录存在且具有适当的读写权限,或者根据实际情况修改挂载路径。
|
||||
3. 在生产环境中,请使用安全的密钥替换示例中的 `PODCAST_API_SECRET_KEY`。
|
||||
4. 使用 Docker Compose 时,服务间通过服务名称进行通信,Web 应用通过 `http://server:8000` 访问 Server 应用。
|
||||
3. 请确保宿主机上的 `/opt/sqlite.db` 文件存在且具有适当的读写权限。
|
||||
4. 请确保宿主机上的 `/opt/audio/.env` 文件存在且包含正确的环境变量配置。
|
||||
5. 请确保宿主机上的 `/opt/audio/config` 目录存在且包含正确的配置文件。
|
||||
6. 在生产环境中,请使用安全的密钥替换示例中的 `PODCAST_API_SECRET_KEY`。
|
||||
7. 使用 Docker Compose 时,服务间通过服务名称进行通信,Web 应用通过 `http://server:8000` 访问 Server 应用。
|
||||
@@ -14,9 +14,6 @@ RUN npm install --frozen-lockfile \
|
||||
# Copy the rest of the application code (web directory content)
|
||||
COPY web .
|
||||
|
||||
# Copy parent config directory
|
||||
COPY config ../config
|
||||
|
||||
# Build the Next.js application
|
||||
# The `standalone` output mode creates a self-contained application
|
||||
ENV NEXT_TELEMETRY_DISABLED 1
|
||||
@@ -34,16 +31,12 @@ RUN npm install @libsql/linux-x64-musl
|
||||
# Set production environment
|
||||
ENV NODE_ENV production
|
||||
|
||||
COPY web/.env ./web/.env
|
||||
COPY web/sqlite.db ./web/sqlite.db
|
||||
# Copy standalone application and public assets from the builder stage
|
||||
COPY --from=builder /app/web/.next/standalone ./web/
|
||||
COPY --from=builder /app/web/.next/static ./web/.next/static
|
||||
COPY --from=builder /app/web/public ./web/public
|
||||
|
||||
# Copy parent config directory from builder stage to runner stage
|
||||
COPY --from=builder /app/config ./config
|
||||
|
||||
# Expose port (Next.js default port)
|
||||
EXPOSE 3000
|
||||
|
||||
|
||||
@@ -11,6 +11,8 @@ services:
|
||||
volumes:
|
||||
- /opt/audio/output:/app/server/output
|
||||
- /opt/audio/sqlite.db:/app/web/sqlite.db
|
||||
- /opt/audio/.env:/app/web/.env
|
||||
- /opt/audio/config:/app/config
|
||||
restart: always
|
||||
container_name: podcast-web
|
||||
depends_on:
|
||||
|
||||
@@ -18,7 +18,7 @@ from tts_adapters import TTSAdapter, IndexTTSAdapter, EdgeTTSAdapter, FishAudioA
|
||||
|
||||
# Global configuration
|
||||
output_dir = "output"
|
||||
file_list_path = os.path.join(output_dir, "file_list.txt")
|
||||
# file_list_path is now generated uniquely for each merge operation
|
||||
tts_providers_config_path = '../config/tts_providers.json'
|
||||
|
||||
def read_file_content(filepath):
|
||||
@@ -111,7 +111,7 @@ def generate_speaker_id_text(pod_users, voices_list):
|
||||
|
||||
return "。".join(speaker_info) + "。"
|
||||
|
||||
def merge_audio_files():
|
||||
def merge_audio_files(file_list_path: str):
|
||||
# 生成一个唯一的UUID
|
||||
unique_id = str(uuid.uuid4())
|
||||
unique_id = unique_id.replace("-", "")
|
||||
@@ -136,7 +136,7 @@ def merge_audio_files():
|
||||
"ffmpeg",
|
||||
"-f", "concat",
|
||||
"-safe", "0",
|
||||
"-i", os.path.basename(file_list_path),
|
||||
"-i", os.path.basename(file_list_path), # Use the passed file_list_path
|
||||
"-acodec", "pcm_s16le",
|
||||
"-ar", "44100",
|
||||
"-ac", "2",
|
||||
@@ -167,23 +167,41 @@ def merge_audio_files():
|
||||
except subprocess.CalledProcessError as e:
|
||||
raise RuntimeError(f"Error merging or converting audio files with FFmpeg: {e.stderr}")
|
||||
finally:
|
||||
# Clean up temporary audio files, the file list, and the intermediate WAV file
|
||||
for item in os.listdir(output_dir):
|
||||
if item.startswith("temp_audio"):
|
||||
try:
|
||||
os.remove(os.path.join(output_dir, item))
|
||||
except OSError as e:
|
||||
print(f"Error removing temporary audio file {item}: {e}") # This should not stop the process
|
||||
# Clean up audio files listed in the file list, the file list itself, and the intermediate WAV file
|
||||
try:
|
||||
os.remove(file_list_path)
|
||||
except OSError as e:
|
||||
print(f"Error removing file list {file_list_path}: {e}") # This should not stop the process
|
||||
# Read the file list and delete the audio files listed
|
||||
if os.path.exists(file_list_path):
|
||||
with open(file_list_path, 'r', encoding='utf-8') as f:
|
||||
for line in f:
|
||||
# Parse lines like: file 'temp_audio_12345.mp3'
|
||||
if line.startswith("file "):
|
||||
# Extract the filename, removing quotes
|
||||
filename = line[5:].strip().strip("'\"")
|
||||
filepath = os.path.join(output_dir, filename)
|
||||
try:
|
||||
if os.path.exists(filepath):
|
||||
os.remove(filepath)
|
||||
print(f"Deleted audio file: {filename}")
|
||||
except OSError as e:
|
||||
print(f"Error removing audio file {filename}: {e}")
|
||||
|
||||
# Delete the file list itself
|
||||
try:
|
||||
os.remove(file_list_path)
|
||||
print(f"Deleted file list: {os.path.basename(file_list_path)}")
|
||||
except OSError as e:
|
||||
print(f"Error removing file list {file_list_path}: {e}")
|
||||
except Exception as e:
|
||||
print(f"Error reading file list for cleanup: {e}")
|
||||
|
||||
# Clean up the intermediate WAV file
|
||||
try:
|
||||
if os.path.exists(output_audio_filepath_wav):
|
||||
os.remove(output_audio_filepath_wav)
|
||||
print(f"Cleaned up intermediate WAV file: {output_audio_filename_wav}")
|
||||
except OSError as e:
|
||||
print(f"Error removing intermediate WAV file {output_audio_filepath_wav}: {e}")
|
||||
|
||||
print("Cleaned up temporary files.")
|
||||
|
||||
def get_audio_duration(filepath: str) -> Optional[float]:
|
||||
@@ -219,6 +237,102 @@ def get_audio_duration(filepath: str) -> Optional[float]:
|
||||
print(f"An unexpected error occurred while getting audio duration for {filepath}: {e}")
|
||||
return None
|
||||
|
||||
def trim_audio_silence(input_filepath: str, output_filepath: str, silence_threshold_db: float = -60, min_silence_duration: float = 0.5):
|
||||
"""
|
||||
Removes leading and trailing silence from an audio file using ffmpeg.
|
||||
|
||||
Args:
|
||||
input_filepath (str): Path to the input audio file.
|
||||
output_filepath (str): Path where the trimmed audio file will be saved.
|
||||
silence_threshold_db (float): Silence threshold in dB. Audio below this level is considered silence.
|
||||
min_silence_duration (float): Minimum duration of silence to detect, in seconds.
|
||||
"""
|
||||
try:
|
||||
# Check if ffmpeg is available
|
||||
subprocess.run(["ffmpeg", "-version"], check=True, capture_output=True)
|
||||
except FileNotFoundError:
|
||||
raise RuntimeError("FFmpeg is not installed or not in your PATH. Please install FFmpeg to trim audio silence. You can download FFmpeg from: https://ffmpeg.org/download.html")
|
||||
|
||||
print(f"Trimming silence from {input_filepath}...")
|
||||
try:
|
||||
command = [
|
||||
"ffmpeg",
|
||||
"-i", input_filepath,
|
||||
"-af",
|
||||
f"silencedetect=n={silence_threshold_db}dB:d={min_silence_duration}",
|
||||
"-f", "null",
|
||||
"-"
|
||||
]
|
||||
process = subprocess.run(command, check=False, capture_output=True, text=True)
|
||||
|
||||
# Parse output for silence points
|
||||
lines = process.stderr.splitlines()
|
||||
start_trim = 0.0
|
||||
end_trim = get_audio_duration(input_filepath) # Default to full duration
|
||||
|
||||
silence_starts = []
|
||||
silence_ends = []
|
||||
|
||||
for line in lines:
|
||||
if "silencedetect" in line:
|
||||
if "silence_start" in line:
|
||||
match = re.search(r"silence_start: (\d+\.\d+)", line)
|
||||
if match:
|
||||
start = float(match.group(1))
|
||||
silence_starts.append(start)
|
||||
elif "silence_end" in line:
|
||||
match = re.search(r"silence_end: (\d+\.\d+)", line)
|
||||
if match:
|
||||
end = float(match.group(1))
|
||||
silence_ends.append(end)
|
||||
|
||||
current_audio_duration = get_audio_duration(input_filepath)
|
||||
if current_audio_duration is None:
|
||||
print(f"Warning: Could not get duration for {input_filepath}. Skipping silence trim.")
|
||||
subprocess.run(["ffmpeg", "-i", input_filepath, "-c", "copy", output_filepath], check=True)
|
||||
return
|
||||
|
||||
start_trim_val = 0.0 # Initialize start_trim_val
|
||||
end_trim_val = current_audio_duration # Initialize end_trim_val with the full duration
|
||||
|
||||
if silence_starts and silence_ends:
|
||||
# Determine leading silence
|
||||
if silence_starts[0] == 0.0: # Silence at the very beginning
|
||||
start_trim_val = silence_ends[0]
|
||||
|
||||
# Determine trailing silence
|
||||
# Only consider trimming from the end if there's silence close to the end
|
||||
if silence_ends[-1] >= (end_trim_val - min_silence_duration):
|
||||
end_trim_val = silence_starts[-1]
|
||||
|
||||
# If after trimming, the duration becomes too short or negative, skip trimming
|
||||
if (end_trim_val - start_trim_val) <= 0.01: # Add a small epsilon to avoid issues with very short audios
|
||||
print(f"Skipping trim for {input_filepath}: trimmed duration too short or negative. Copying original.")
|
||||
# If trimming would result in empty or near-empty file, just copy the original
|
||||
subprocess.run(["ffmpeg", "-i", input_filepath, "-c", "copy", output_filepath], check=True)
|
||||
else:
|
||||
# Perform the actual trim using detected silence points
|
||||
trim_command = [
|
||||
"ffmpeg",
|
||||
"-ss", str(start_trim_val), # Move -ss before -i for accurate seeking
|
||||
"-i", input_filepath,
|
||||
"-to", str(end_trim_val),
|
||||
"-avoid_negative_ts", "auto", # Add to handle potential time stamp issues
|
||||
"-c:a", "libmp3lame", # Re-encode to MP3 for consistency and smaller size
|
||||
"-q:a", "2", # High quality
|
||||
output_filepath
|
||||
]
|
||||
subprocess.run(trim_command, check=True, capture_output=True, text=True)
|
||||
trimmed_duration = get_audio_duration(output_filepath)
|
||||
print(f"Trimmed audio saved to {output_filepath}. Original duration: {current_audio_duration:.2f}s, Trimmed duration: {trimmed_duration:.2f}s")
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"FFmpeg stderr during silence detection or trimming:\n{e.stderr}")
|
||||
raise RuntimeError(f"Error trimming audio silence with FFmpeg for {input_filepath}: {e}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"An unexpected error occurred during audio trimming for {input_filepath}: {e}")
|
||||
|
||||
|
||||
def _parse_arguments():
|
||||
"""Parses command-line arguments."""
|
||||
parser = argparse.ArgumentParser(description="Generate podcast script and audio using OpenAI and local TTS.")
|
||||
@@ -301,8 +415,14 @@ def _prepare_podcast_prompts(config_data, original_podscript_prompt, custom_cont
|
||||
|
||||
original_podscript_prompt = original_podscript_prompt.replace("{{numSpeakers}}", str(len(pod_users)))
|
||||
original_podscript_prompt = original_podscript_prompt.replace("{{turnPattern}}", turn_pattern)
|
||||
original_podscript_prompt = original_podscript_prompt.replace("{{usetime}}", usetime if usetime is not None else "5-6 minutes")
|
||||
original_podscript_prompt = original_podscript_prompt.replace("{{outlang}}", output_language if output_language is not None else "Make sure the input language is set as the output language")
|
||||
|
||||
usetime = usetime if usetime is not None else "5-6 minutes"
|
||||
print(f"\nGenerating Script Replace Usetime: {usetime}")
|
||||
original_podscript_prompt = original_podscript_prompt.replace("{{usetime}}", usetime)
|
||||
|
||||
output_language = output_language if output_language is not None else "Make sure the input language is set as the output language"
|
||||
print(f"\nGenerating Script Replace Output Language: {output_language}")
|
||||
original_podscript_prompt = original_podscript_prompt.replace("{{outlang}}", output_language)
|
||||
|
||||
speaker_id_info = generate_speaker_id_text(pod_users, voices)
|
||||
podscript_prompt = speaker_id_info + "\n\n" + custom_content + "\n\n" + original_podscript_prompt
|
||||
@@ -318,17 +438,26 @@ def _generate_overview_content(api_key, base_url, model, overview_prompt, input_
|
||||
openai_client_overview = OpenAICli(api_key=api_key, base_url=base_url, model=model, system_message=formatted_overview_prompt)
|
||||
overview_response_generator = openai_client_overview.chat_completion(messages=[{"role": "user", "content": input_prompt}])
|
||||
overview_content = "".join([chunk.choices[0].delta.content for chunk in overview_response_generator if chunk.choices and chunk.choices[0].delta.content])
|
||||
|
||||
print("Generated Overview:")
|
||||
print(overview_content[:100])
|
||||
|
||||
# Extract title (first line) and tags (second line)
|
||||
lines = overview_content.strip().split('\n')
|
||||
title = lines[0].strip() if len(lines) > 0 else ""
|
||||
tags = lines[1].strip() if len(lines) > 1 else ""
|
||||
tags = ""
|
||||
# 重复判断3次是否有非空值,没有值就取下一行
|
||||
for i in range(1, min(len(lines), 4)): # 检查第2到第4行 (索引1到3)
|
||||
current_tags = lines[i].strip()
|
||||
if current_tags:
|
||||
tags = current_tags
|
||||
# 保留取到tags的索引行,从下一行开始截取到最后一行,保存数据到overview_content
|
||||
overview_content = "\n".join(lines[i+1:]).strip()
|
||||
break
|
||||
else: # 如果循环结束没有找到非空tags,则从第二行开始截取
|
||||
overview_content = "\n".join(lines[1:]).strip()
|
||||
|
||||
print(f"Extracted Title: {title}")
|
||||
print(f"Extracted Tags: {tags}")
|
||||
print("Generated Overview:")
|
||||
print(overview_content[:100])
|
||||
|
||||
return overview_content, title, tags
|
||||
except Exception as e:
|
||||
@@ -449,11 +578,20 @@ def _generate_all_audio_files(podcast_script, config_data, tts_adapter: TTSAdapt
|
||||
for future in as_completed(future_to_index):
|
||||
index = future_to_index[future]
|
||||
try:
|
||||
result = future.result()
|
||||
if result:
|
||||
audio_files_dict[index] = result
|
||||
original_audio_file = future.result()
|
||||
if original_audio_file:
|
||||
# Define a path for the trimmed audio file
|
||||
trimmed_audio_file = os.path.join(output_dir, f"trimmed_{os.path.basename(original_audio_file)}")
|
||||
trim_audio_silence(original_audio_file, trimmed_audio_file)
|
||||
# Use the trimmed file for the final merge
|
||||
audio_files_dict[index] = trimmed_audio_file
|
||||
# Clean up the original untrimmed file
|
||||
try:
|
||||
os.remove(original_audio_file)
|
||||
except OSError as e:
|
||||
print(f"Error removing untrimmed audio file {original_audio_file}: {e}")
|
||||
except Exception as e:
|
||||
exception_caught = RuntimeError(f"Error generating audio for item {index}: {e}")
|
||||
exception_caught = RuntimeError(f"Error generating or trimming audio for item {index}: {e}")
|
||||
# An error occurred, we should stop.
|
||||
break
|
||||
|
||||
@@ -470,19 +608,29 @@ def _generate_all_audio_files(podcast_script, config_data, tts_adapter: TTSAdapt
|
||||
print(f"\nFinished generating individual audio files. Total files: {len(audio_files)}")
|
||||
return audio_files
|
||||
|
||||
def _create_ffmpeg_file_list(audio_files):
|
||||
def _create_ffmpeg_file_list(audio_files, expected_count: int):
|
||||
"""Creates the file list for FFmpeg concatenation."""
|
||||
if not audio_files:
|
||||
raise ValueError("No audio files were generated to merge.")
|
||||
|
||||
print(f"Creating file list for ffmpeg at: {file_list_path}")
|
||||
with open(file_list_path, 'w', encoding='utf-8') as f:
|
||||
if len(audio_files) != expected_count:
|
||||
raise RuntimeError(f"Mismatch in audio file count. Expected {expected_count}, but got {len(audio_files)}. Some audio files might be missing or an error occurred during generation.")
|
||||
|
||||
# Generate a unique file list path using UUID
|
||||
unique_id = str(uuid.uuid4()).replace("-", "")
|
||||
unique_file_list_path = os.path.join(output_dir, f"file_list_{unique_id}.txt")
|
||||
|
||||
print(f"Creating file list for ffmpeg at: {unique_file_list_path}")
|
||||
with open(unique_file_list_path, 'w', encoding='utf-8') as f:
|
||||
for audio_file in audio_files:
|
||||
f.write(f"file '{os.path.basename(audio_file)}'\n")
|
||||
|
||||
print("Content of file_list.txt:")
|
||||
with open(file_list_path, 'r', encoding='utf-8') as f:
|
||||
print(f"Content of {os.path.basename(unique_file_list_path)}:")
|
||||
with open(unique_file_list_path, 'r', encoding='utf-8') as f:
|
||||
print(f.read())
|
||||
|
||||
# Return the unique file list path for use in merge_audio_files
|
||||
return unique_file_list_path
|
||||
|
||||
from typing import cast # Add import for cast
|
||||
|
||||
@@ -569,8 +717,8 @@ def generate_podcast_audio():
|
||||
tts_adapter = _initialize_tts_adapter(config_data) # 初始化 TTS 适配器
|
||||
|
||||
audio_files = _generate_all_audio_files(podcast_script, config_data, tts_adapter, args.threads)
|
||||
_create_ffmpeg_file_list(audio_files)
|
||||
output_audio_filepath = merge_audio_files()
|
||||
file_list_path_created = _create_ffmpeg_file_list(audio_files, len(podcast_script.get("podcast_transcripts", [])))
|
||||
output_audio_filepath = merge_audio_files(file_list_path_created)
|
||||
return {
|
||||
"output_audio_filepath": output_audio_filepath,
|
||||
"overview_content": overview_content,
|
||||
@@ -616,9 +764,8 @@ def generate_podcast_audio_api(args, config_path: str, input_txt_content: str, t
|
||||
tts_adapter = _initialize_tts_adapter(config_data, tts_providers_config_content) # 初始化 TTS 适配器
|
||||
|
||||
audio_files = _generate_all_audio_files(podcast_script, config_data, tts_adapter, args.threads)
|
||||
_create_ffmpeg_file_list(audio_files)
|
||||
|
||||
output_audio_filepath = merge_audio_files()
|
||||
file_list_path_created = _create_ffmpeg_file_list(audio_files, len(podcast_script.get("podcast_transcripts", [])))
|
||||
output_audio_filepath = merge_audio_files(file_list_path_created)
|
||||
|
||||
audio_duration_seconds = get_audio_duration(os.path.join(output_dir, output_audio_filepath))
|
||||
formatted_duration = "00:00"
|
||||
|
||||
@@ -56,19 +56,27 @@ You are a master podcast scriptwriter, adept at transforming diverse input conte
|
||||
* **Weave Information Naturally:** Integrate facts and data from the source within the group dialogue, not as standalone, undigested blocks.
|
||||
|
||||
6. **Length & Pacing:**
|
||||
|
||||
* **Target Duration & Word Count:** Create a transcript that would result in approximately {{usetime}} of audio. Use the following word count guidelines:
|
||||
* "Under 5 minutes": Aim for 800-1000 words.
|
||||
* "5-10 minutes": Aim for 1000-2000 words.
|
||||
* "10-15 minutes": Aim for 2000-3000 words.
|
||||
|
||||
|
||||
* **Target Duration & Word Count:** Create a transcript that would result in approximately {{usetime}} of audio, default language is {{outlang}}. Use the following word count guidelines:
|
||||
* "Under 5 minutes": For English, the goal is 800-1000 words; for Chinese and Japanese, the goal is 800-1500 Chinese or Japanese characters
|
||||
* "8-15 minutes": For English, the goal is 1500-3500 words; for Chinese and Japanese, the goal is 3000-6000 Chinese or Japanese characters
|
||||
|
||||
* **Content Coverage Mandate:** The primary goal is to ensure that **every distinct topic, key fact, or main idea** present in the `<source_content>` is mentioned or referenced in the final transcript. No major informational point should be completely omitted.
|
||||
|
||||
* **Prioritization Strategy:** While all topics must be covered, you must allocate speaking time and discussion depth according to their importance.
|
||||
* **Key Topics:** Dedicate more dialogue, examples, and analysis from multiple hosts to the most central and significant points from the source material. These should form the core of the conversation.
|
||||
* **Secondary Topics:** Less critical information or minor details should be handled more concisely. They can be introduced as quick facts by the "Expert" host, used as transitional statements by the moderator, or briefly acknowledged without extensive discussion. This ensures they are included without disrupting the flow or consuming disproportionate time.
|
||||
|
||||
7. **Copy & Replacement:**
|
||||
7. **Dialogue Deepening & Expansion Techniques:**
|
||||
|
||||
* **In order to meet the mandatory word count or characters count target defined in Guideline 6, you must actively apply the following techniques to expand and deepen the conversation. Strictly avoid ending the topic prematurely:**
|
||||
* **Follow-up & Clarification:** After each point is made, other hosts **must** ask follow-up questions. For example: "Can you give a real-life example?" or "What does this mean for the average person?"
|
||||
* **Examples & Analogies:** For core concepts, the 'Expert' persona **must** use rich examples or vivid analogies to explain them.
|
||||
* **Divergence & Association:** The host can guide the conversation toward moderate divergences. For example: "Speaking of that, it reminds me of..." or "What kind of future developments might we see in this area?"
|
||||
* **Debate & Contrasting Views:** Use the host personas to create discussions from different perspectives, compelling other hosts to provide more detailed defenses and explanations.
|
||||
* **Restatement & Summary:** The host (`speaker_0`) should provide restatements and summaries during pauses in the discussion and at the end of topics.
|
||||
|
||||
8. **Copy & Replacement:**
|
||||
If a hyphen connects English letters and numbers or letters on both sides, replace it with a space.
|
||||
Replace four-digit Arabic numerals with their Chinese character equivalents, one-to-one.
|
||||
|
||||
|
||||
@@ -3,10 +3,10 @@
|
||||
/** @type {import('next-sitemap').IConfig} */
|
||||
module.exports = {
|
||||
// 必须项,你的网站域名
|
||||
siteUrl: process.env.NEXT_PUBLIC_BASE_URL || 'http://localhost:3000',
|
||||
siteUrl: process.env.NEXT_PUBLIC_BASE_URL || 'http://localhost:3000/',
|
||||
|
||||
// (可选) 自动生成 robots.txt 文件,默认为 false
|
||||
generateRobotsTxt: true,
|
||||
generateRobotsTxt: true,
|
||||
|
||||
// (可选) 自定义 robots.txt 的内容
|
||||
robotsTxtOptions: {
|
||||
@@ -27,23 +27,89 @@ module.exports = {
|
||||
},
|
||||
|
||||
// (可选) 排除特定的路由
|
||||
exclude: ['/api/*'],
|
||||
exclude: ['/api/*', '/_next/*', '/static/*'],
|
||||
|
||||
// 这个函数会在构建时执行
|
||||
// additionalPaths: async (config) => {
|
||||
// // 示例:从外部 API 获取所有博客文章的 slug
|
||||
// const response = await fetch('https://api.example.com/posts');
|
||||
// const posts = await response.json(); // 假设返回 [{ slug: 'post-1', updatedAt: '2023-01-01' }, ...]
|
||||
// 支持多语言
|
||||
i18n: {
|
||||
locales: ['en', 'zh-CN', 'ja'],
|
||||
defaultLocale: 'en',
|
||||
},
|
||||
|
||||
// // 将文章数据转换为 next-sitemap 需要的格式
|
||||
// const paths = posts.map(post => ({
|
||||
// loc: `/blog/${post.slug}`, // URL 路径
|
||||
// changefreq: 'weekly',
|
||||
// priority: 0.7,
|
||||
// lastmod: new Date(post.updatedAt).toISOString(), // 最后修改时间
|
||||
// }));
|
||||
// 包含静态页面
|
||||
transform: async (config, path) => {
|
||||
// 为动态路由设置默认值
|
||||
if (path.includes('[fileName]')) {
|
||||
return null; // 这些将在 additionalPaths 中处理
|
||||
}
|
||||
|
||||
return {
|
||||
loc: path,
|
||||
changefreq: 'daily',
|
||||
priority: path === '/' ? 1.0 : 0.8,
|
||||
lastmod: new Date().toISOString(),
|
||||
};
|
||||
},
|
||||
|
||||
// // 返回一个 Promise,解析为一个路径数组
|
||||
// return paths;
|
||||
// },
|
||||
// 添加动态路由和多语言支持
|
||||
additionalPaths: async (config) => {
|
||||
const paths = [];
|
||||
|
||||
// 支持的语言
|
||||
const languages = ['en', 'zh-CN', 'ja'];
|
||||
|
||||
// 添加静态页面路径(包含多语言版本)
|
||||
const staticPaths = [
|
||||
'/',
|
||||
'/pricing',
|
||||
'/contact',
|
||||
'/privacy',
|
||||
'/terms'
|
||||
];
|
||||
|
||||
staticPaths.forEach(path => {
|
||||
// 添加默认语言路径
|
||||
paths.push({
|
||||
loc: path,
|
||||
changefreq: 'daily',
|
||||
priority: path === '/' ? 1.0 : 0.8,
|
||||
lastmod: new Date().toISOString(),
|
||||
});
|
||||
|
||||
// 为每种语言添加本地化路径
|
||||
languages.forEach(lang => {
|
||||
const localizedPath = `/${lang}${path === '/' ? '' : path}`;
|
||||
paths.push({
|
||||
loc: localizedPath,
|
||||
changefreq: 'daily',
|
||||
priority: path === '/' ? 1.0 : 0.8,
|
||||
lastmod: new Date().toISOString(),
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// 如果有播客文件,可以在这里添加动态路径
|
||||
// 示例:从数据库或文件系统获取播客文件名
|
||||
// const podcastFiles = await getPodcastFiles(); // 你需要实现这个函数
|
||||
// podcastFiles.forEach(fileName => {
|
||||
// // 添加默认语言路径
|
||||
// paths.push({
|
||||
// loc: `/podcast/${fileName}`,
|
||||
// changefreq: 'weekly',
|
||||
// priority: 0.6,
|
||||
// lastmod: new Date().toISOString(),
|
||||
// });
|
||||
//
|
||||
// // 为每种语言添加本地化路径
|
||||
// languages.forEach(lang => {
|
||||
// paths.push({
|
||||
// loc: `/${lang}/podcast/${fileName}`,
|
||||
// changefreq: 'weekly',
|
||||
// priority: 0.6,
|
||||
// lastmod: new Date().toISOString(),
|
||||
// });
|
||||
// });
|
||||
// });
|
||||
|
||||
return paths;
|
||||
},
|
||||
};
|
||||
@@ -81,6 +81,11 @@
|
||||
"checkIn": "Check In",
|
||||
"create": "Create",
|
||||
"biu": "Biu!",
|
||||
"confirm": "Confirm",
|
||||
"cancel": "Cancel",
|
||||
"close": "Close",
|
||||
"confirmGeneration": "Confirm Generation",
|
||||
"confirmGenerationMessage": "This operation will consume {{points}} points, continue?",
|
||||
"checkInSuccess": "Check-in successful",
|
||||
"checkInFailed": "Check-in failed",
|
||||
"networkError": "Network error or server no response",
|
||||
@@ -95,9 +100,8 @@
|
||||
"chinese": "Chinese",
|
||||
"english": "English",
|
||||
"japanese": "Japanese",
|
||||
"under5Minutes": "Under 5 minutes",
|
||||
"between5And10Minutes": "5-10 minutes",
|
||||
"between10And15Minutes": "10-15 minutes"
|
||||
"under5Minutes": "5 minutes or less",
|
||||
"between8And15Minutes": "8-15 minutes"
|
||||
},
|
||||
"podcastTabs": {
|
||||
"script": "Script",
|
||||
@@ -256,5 +260,11 @@
|
||||
"maxVoicesAlert": "You can select up to 5 speakers.",
|
||||
"delete": "Delete",
|
||||
"presenter": "Presenter"
|
||||
},
|
||||
"newUser": {
|
||||
"noPointsAccount": "User {{userId}} has no points account, initializing...",
|
||||
"initialBonusDescription": "New user registration, initial points bonus",
|
||||
"initError": "Failed to initialize user {{userId}} points account or record transaction: {{error}}",
|
||||
"pointsAccountExists": "User {{userId}} already has a points account, no initialization required."
|
||||
}
|
||||
}
|
||||
@@ -81,6 +81,11 @@
|
||||
"checkIn": "チェックイン",
|
||||
"create": "作成",
|
||||
"biu": "びゅう!",
|
||||
"confirm": "確認",
|
||||
"cancel": "キャンセル",
|
||||
"close": "閉じる",
|
||||
"confirmGeneration": "生成の確認",
|
||||
"confirmGenerationMessage": "この操作では{{points}}ポイントが消費されます。続行しますか?",
|
||||
"checkInSuccess": "チェックイン成功",
|
||||
"checkInFailed": "チェックイン失敗",
|
||||
"networkError": "ネットワークエラーまたはサーバー応答なし",
|
||||
@@ -95,9 +100,8 @@
|
||||
"chinese": "中国語",
|
||||
"english": "英語",
|
||||
"japanese": "日本語",
|
||||
"under5Minutes": "5分未満",
|
||||
"between5And10Minutes": "5〜10分",
|
||||
"between10And15Minutes": "10〜15分"
|
||||
"under5Minutes": "約5分",
|
||||
"between8And15Minutes": "8〜15分"
|
||||
},
|
||||
"podcastTabs": {
|
||||
"script": "スクリプト",
|
||||
@@ -256,5 +260,11 @@
|
||||
"maxVoicesAlert": "最大5人のスピーカーを選択できます。",
|
||||
"delete": "削除",
|
||||
"presenter": "プレゼンター"
|
||||
},
|
||||
"newUser": {
|
||||
"noPointsAccount": "ユーザー {{userId}} にポイントアカウントがありません。初期化しています...",
|
||||
"initialBonusDescription": "新規ユーザー登録、初回ポイントボーナス",
|
||||
"initError": "ユーザー {{userId}} のポイントアカウントの初期化またはトランザクションの記録に失敗しました: {{error}}",
|
||||
"pointsAccountExists": "ユーザー {{userId}} はすでにポイントアカウントを持っています。初期化は不要です。"
|
||||
}
|
||||
}
|
||||
@@ -81,6 +81,11 @@
|
||||
"checkIn": "签到",
|
||||
"create": "创作",
|
||||
"biu": "Biu!",
|
||||
"confirm": "确认",
|
||||
"cancel": "取消",
|
||||
"close": "关闭",
|
||||
"confirmGeneration": "确认生成",
|
||||
"confirmGenerationMessage": "本次操作将消耗 {{points}} 积分,是否继续?",
|
||||
"checkInSuccess": "签到成功",
|
||||
"checkInFailed": "签到失败",
|
||||
"networkError": "网络错误或服务器无响应",
|
||||
@@ -95,9 +100,8 @@
|
||||
"chinese": "中文",
|
||||
"english": "英文",
|
||||
"japanese": "日文",
|
||||
"under5Minutes": "5分钟以内",
|
||||
"between5And10Minutes": "5-10分钟",
|
||||
"between10And15Minutes": "10-15分钟"
|
||||
"under5Minutes": "5分钟左右",
|
||||
"between8And15Minutes": "8-15分钟"
|
||||
},
|
||||
"podcastTabs": {
|
||||
"script": "脚本",
|
||||
@@ -256,5 +260,11 @@
|
||||
"maxVoicesAlert": "最多只能选择5个说话人。",
|
||||
"delete": "删除",
|
||||
"presenter": "主讲人"
|
||||
},
|
||||
"newUser": {
|
||||
"noPointsAccount": "用户 {{userId}} 不存在积分账户,正在初始化...",
|
||||
"initialBonusDescription": "新用户注册,初始积分奖励",
|
||||
"initError": "初始化用户 {{userId}} 积分账户或记录流水失败: {{error}}",
|
||||
"pointsAccountExists": "用户 {{userId}} 已存在积分账户,无需初始化。"
|
||||
}
|
||||
}
|
||||
@@ -76,7 +76,7 @@ export async function POST(request: NextRequest) {
|
||||
);
|
||||
}
|
||||
|
||||
const allowedDurations = ['Under 5 minutes', '5-10 minutes', '10-15 minutes'];
|
||||
const allowedDurations = ['Under 5 minutes', '8-15 minutes'];
|
||||
if (!body.usetime || !allowedDurations.includes(body.usetime)) {
|
||||
return NextResponse.json(
|
||||
{ success: false, error: t('invalid_podcast_duration') },
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
import { NextResponse, NextRequest } from 'next/server';
|
||||
import { getSessionData } from "@/lib/server-actions";
|
||||
import { createPointsAccount, recordPointsTransaction, checkUserPointsAccount } from "@/lib/points"; // 导入新封装的函数
|
||||
import { getTranslation } from '@/i18n';
|
||||
import { fallbackLng } from '@/i18n/settings';
|
||||
|
||||
export async function GET(request: NextRequest) {
|
||||
const lng = request.headers.get('x-next-pathname') || fallbackLng;
|
||||
const { t } = await getTranslation(lng, 'components');
|
||||
|
||||
const sessionData = await getSessionData();
|
||||
let baseUrl = process.env.NEXT_PUBLIC_BASE_URL || "/";
|
||||
const pathname = request.nextUrl.searchParams.get('pathname');
|
||||
@@ -24,18 +29,18 @@ export async function GET(request: NextRequest) {
|
||||
|
||||
// 如果不存在积分账户,则初始化
|
||||
if (!userHasPointsAccount) {
|
||||
console.log(`用户 ${userId} 不存在积分账户,正在初始化...`);
|
||||
console.log(t('newUser.noPointsAccount', { userId }));
|
||||
try {
|
||||
const pointsPerPodcastDay = parseInt(process.env.POINTS_PER_PODCAST_INIT || '100', 10);
|
||||
await createPointsAccount(userId, pointsPerPodcastDay); // 调用封装的创建积分账户函数
|
||||
await recordPointsTransaction(userId, pointsPerPodcastDay, "initial_bonus", "新用户注册,初始积分奖励"); // 调用封装的记录流水函数
|
||||
await recordPointsTransaction(userId, pointsPerPodcastDay, "initial_bonus", t('newUser.initialBonusDescription')); // 调用封装的记录流水函数
|
||||
} catch (error) {
|
||||
console.error(`初始化用户 ${userId} 积分账户或记录流水失败:`, error);
|
||||
console.error(t('newUser.initError', { userId, error }));
|
||||
// 根据错误类型,可能需要更详细的错误处理或重定向
|
||||
// 例如,如果 userId 无效,可以重定向到错误页面
|
||||
}
|
||||
} else {
|
||||
console.log(`用户 ${userId} 已存在积分账户,无需初始化。`);
|
||||
console.log(t('newUser.pointsAccountExists', { userId }));
|
||||
}
|
||||
|
||||
// 创建一个 URL 对象,指向要重定向到的根目录
|
||||
|
||||
110
web/src/components/ConfirmModal.tsx
Normal file
110
web/src/components/ConfirmModal.tsx
Normal file
@@ -0,0 +1,110 @@
|
||||
// web/src/components/ConfirmModal.tsx
|
||||
"use client"; // 标记为客户端组件,因为需要交互性
|
||||
|
||||
import React, { FC, MouseEventHandler, useCallback, useRef } from "react";
|
||||
import { createPortal } from "react-dom";
|
||||
import { XMarkIcon } from "@heroicons/react/24/outline"; // 导入关闭图标
|
||||
import { useTranslation } from '../i18n/client'; // 导入 useTranslation
|
||||
|
||||
interface ConfirmModalProps {
|
||||
isOpen: boolean;
|
||||
onClose: () => void;
|
||||
onConfirm: () => void;
|
||||
title: string;
|
||||
message: string;
|
||||
points?: number; // 新增 points 属性
|
||||
confirmText?: string;
|
||||
cancelText?: string;
|
||||
lang: string; // 新增 lang 属性
|
||||
}
|
||||
|
||||
const ConfirmModal: FC<ConfirmModalProps> = ({
|
||||
isOpen,
|
||||
onClose,
|
||||
onConfirm,
|
||||
title,
|
||||
message,
|
||||
points,
|
||||
confirmText,
|
||||
cancelText,
|
||||
lang
|
||||
}) => {
|
||||
const { t } = useTranslation(lang, 'components'); // 初始化 useTranslation 并指定命名空间
|
||||
const modalRef = useRef<HTMLDivElement>(null);
|
||||
|
||||
// 点击背景关闭模态框
|
||||
const handleOverlayClick: MouseEventHandler<HTMLDivElement> = useCallback(
|
||||
(e) => {
|
||||
if (modalRef.current && !modalRef.current.contains(e.target as Node)) {
|
||||
onClose();
|
||||
}
|
||||
},
|
||||
[onClose]
|
||||
);
|
||||
|
||||
const handleConfirm = () => {
|
||||
onConfirm();
|
||||
onClose();
|
||||
};
|
||||
|
||||
if (!isOpen) return null;
|
||||
|
||||
// 使用 React Portal 将模态框渲染到 body 下,避免Z-index问题和父组件样式影响
|
||||
return createPortal(
|
||||
<div
|
||||
className="fixed inset-0 z-50 flex items-center justify-center bg-black bg-opacity-50 backdrop-blur-sm p-4 overflow-auto"
|
||||
onClick={handleOverlayClick}
|
||||
aria-modal="true"
|
||||
role="dialog"
|
||||
>
|
||||
<div
|
||||
ref={modalRef}
|
||||
className="relative bg-white dark:bg-gray-800 rounded-lg shadow-xl w-full max-w-sm p-4 sm:p-6 transform transition-all duration-300 ease-out scale-95 opacity-0 animate-scale-in"
|
||||
// 使用 Tailwind CSS 动画来优化进入效果,确保布局健壮性
|
||||
style={{ animationFillMode: 'forwards' }} // 动画结束后保持最终状态
|
||||
>
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="absolute top-4 right-4 text-gray-400 hover:text-gray-600 dark:text-gray-500 dark:hover:text-gray-300 transition-colors"
|
||||
aria-label={t('podcastCreator.close')}
|
||||
>
|
||||
<XMarkIcon className="h-6 w-6" />
|
||||
</button>
|
||||
|
||||
<h2 className="text-2xl font-bold text-gray-900 dark:text-white mb-4 text-center">
|
||||
{title}
|
||||
</h2>
|
||||
|
||||
<p
|
||||
className="text-gray-700 dark:text-gray-300 mb-6 text-center"
|
||||
dangerouslySetInnerHTML={{
|
||||
__html: message.replace('{{points}}',
|
||||
points !== undefined ?
|
||||
`<span class="font-bold text-brand-purple dark:text-brand-pink">${points}</span>` :
|
||||
'{{points}}'
|
||||
)
|
||||
}}
|
||||
/>
|
||||
|
||||
<div className="flex flex-col sm:flex-row gap-3 justify-center">
|
||||
<button
|
||||
onClick={onClose}
|
||||
className="px-4 py-2 border border-gray-300 dark:border-gray-600 rounded-md shadow-sm font-medium text-gray-700 dark:text-gray-200 bg-white dark:bg-gray-700 hover:bg-gray-50 dark:hover:bg-gray-600 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-blue-500 transition-colors"
|
||||
>
|
||||
{cancelText || t('podcastCreator.cancel')}
|
||||
</button>
|
||||
|
||||
<button
|
||||
onClick={handleConfirm}
|
||||
className="px-4 py-2 border border-transparent rounded-md shadow-sm font-medium text-white bg-gradient-to-r from-brand-purple to-brand-pink hover:from-brand-purple-hover hover:to-brand-pink focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-brand-purple transition-all"
|
||||
>
|
||||
{confirmText || t('podcastCreator.confirm')}
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>,
|
||||
document.body // 渲染到 body 元素下
|
||||
);
|
||||
};
|
||||
|
||||
export default ConfirmModal;
|
||||
@@ -147,7 +147,7 @@ export default async function PodcastContent({ fileName, lang }: PodcastContentP
|
||||
{/* 3. 内容导航区和内容展示区 - 使用客户端组件 */}
|
||||
<PodcastTabs
|
||||
parsedScript={parsedScript}
|
||||
overviewContent={audioInfo.overview_content ? audioInfo.overview_content.split('\n').slice(2).join('\n') : ''}
|
||||
overviewContent={audioInfo.overview_content}
|
||||
lang={lang}
|
||||
/>
|
||||
</main>
|
||||
|
||||
@@ -17,6 +17,7 @@ import { cn } from '@/lib/utils';
|
||||
import ConfigSelector from './ConfigSelector';
|
||||
import VoicesModal from './VoicesModal'; // 引入 VoicesModal
|
||||
import LoginModal from './LoginModal'; // 引入 LoginModal
|
||||
import ConfirmModal from './ConfirmModal'; // 引入 ConfirmModal
|
||||
import { useToast, ToastContainer } from './Toast'; // 引入 Toast Hook 和 Container
|
||||
import { setItem, getItem } from '@/lib/storage'; // 引入 localStorage 工具
|
||||
import { useSession } from '@/lib/auth-client'; // 引入 useSession
|
||||
@@ -60,8 +61,7 @@ const PodcastCreator: React.FC<PodcastCreatorProps> = ({
|
||||
|
||||
const durationOptions = [
|
||||
{ value: 'Under 5 minutes', label: t('podcastCreator.under5Minutes') },
|
||||
{ value: '5-10 minutes', label: t('podcastCreator.between5And10Minutes') },
|
||||
{ value: '10-15 minutes', label: t('podcastCreator.between10And15Minutes') },
|
||||
{ value: '8-15 minutes', label: t('podcastCreator.between8And15Minutes') },
|
||||
];
|
||||
|
||||
const [topic, setTopic] = useState('');
|
||||
@@ -97,6 +97,7 @@ const PodcastCreator: React.FC<PodcastCreatorProps> = ({
|
||||
const [duration, setDuration] = useState(durationOptions[0].value);
|
||||
const [showVoicesModal, setShowVoicesModal] = useState(false); // 新增状态
|
||||
const [showLoginModal, setShowLoginModal] = useState(false); // 控制登录模态框的显示
|
||||
const [showConfirmModal, setShowConfirmModal] = useState(false); // 控制确认模态框的显示
|
||||
const [voices, setVoices] = useState<Voice[]>([]); // 从 ConfigSelector 获取 voices
|
||||
const [selectedPodcastVoices, setSelectedPodcastVoices] = useState<{[key: string]: Voice[]}>(() => {
|
||||
// 从 localStorage 读取缓存的说话人配置
|
||||
@@ -129,6 +130,11 @@ const PodcastCreator: React.FC<PodcastCreatorProps> = ({
|
||||
return;
|
||||
}
|
||||
|
||||
// 显示确认对话框
|
||||
setShowConfirmModal(true);
|
||||
};
|
||||
|
||||
const handleConfirmGenerate = async () => {
|
||||
let inputTxtContent = topic.trim();
|
||||
if (customInstructions.trim()) {
|
||||
inputTxtContent = "```custom-begin"+`\n${customInstructions.trim()}\n`+"```custom-end"+`\n${inputTxtContent}`;
|
||||
@@ -526,6 +532,19 @@ const PodcastCreator: React.FC<PodcastCreatorProps> = ({
|
||||
toasts={toasts}
|
||||
onRemove={removeToast}
|
||||
/>
|
||||
|
||||
{/* Confirm Modal */}
|
||||
<ConfirmModal
|
||||
isOpen={showConfirmModal}
|
||||
onClose={() => setShowConfirmModal(false)}
|
||||
onConfirm={handleConfirmGenerate}
|
||||
title={t('podcastCreator.confirmGeneration')}
|
||||
message={t('podcastCreator.confirmGenerationMessage')}
|
||||
points={duration === '8-15 minutes' ?
|
||||
parseInt(process.env.POINTS_PER_PODCAST || '20', 10) * 2 :
|
||||
parseInt(process.env.POINTS_PER_PODCAST || '20', 10)}
|
||||
lang={lang}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
@@ -41,5 +41,5 @@ export function middleware(request: NextRequest) {
|
||||
|
||||
export const config = {
|
||||
// Matcher ignoring `/_next/` and `/api/`
|
||||
matcher: ['/((?!api|_next/static|_next/image|favicon.ico|favicon.webp).*)'],
|
||||
matcher: ['/((?!api|_next/static|_next/image|favicon.ico|favicon.webp|robots.txt|sitemap.xml).*)'],
|
||||
};
|
||||
Reference in New Issue
Block a user