Skip to content

Commit 057b341

Browse files
committed
Remove unused code related to agents configuration
1 parent 9df6f4d commit 057b341

File tree

1 file changed

+45
-45
lines changed

1 file changed

+45
-45
lines changed

backend/genai/src/main.py

Lines changed: 45 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -23,51 +23,51 @@
2323
weave.init(project_name=os.getenv("WEAVE_PROJECT_NAME", ""))
2424
L.initialize(project_api_key=os.getenv("LMNR_PROJECT_API_KEY"))
2525

26-
27-
# see: https://docs.livekit.io/agents/integrations/google/#gemini-llm
28-
llm = livekit_google.LLM(
29-
model="gemini-2.0-flash-exp",
30-
candidate_count=1,
31-
temperature=0.08,
32-
vertexai=True,
33-
tool_choice="auto", # NOTE: 動的に変えたい required, none
34-
)
35-
36-
tts = livekit_google.TTS(
37-
language="ja-JP",
38-
gender="female",
39-
voice_name="ja-JP-Neural2-B", # use Neural2 voice type: ja-JP-Neural2-C, ja-JP-Neural2-D see: https://cloud.google.com/text-to-speech/docs/voices
40-
encoding="linear16",
41-
effects_profile_id="large-automotive-class-device",
42-
sample_rate=24000,
43-
pitch=0,
44-
speaking_rate=1.0,
45-
)
46-
47-
stt = livekit_google.STT(
48-
languages="ja-JP",
49-
detect_language=True,
50-
interim_results=True,
51-
punctuate=True,
52-
spoken_punctuation=True,
53-
model="chirp_2",
54-
sample_rate=16000,
55-
keywords=[
56-
("mi-ho", 24.0), # 仮設定
57-
],
58-
)
59-
60-
model = livekit_google.beta.realtime.RealtimeModel(
61-
model="gemini-2.0-flash-exp",
62-
voice="Charon",
63-
modalities=["TEXT", "AUDIO"],
64-
enable_user_audio_transcription=True,
65-
enable_agent_audio_transcription=True,
66-
vertexai=True,
67-
candidate_count=1,
68-
temperature=0.08,
69-
instructions="You are a helpful assistant",
70-
)
26+
# agents
27+
# # see: https://docs.livekit.io/agents/integrations/google/#gemini-llm
28+
# llm = livekit_google.LLM(
29+
# model="gemini-2.0-flash-exp",
30+
# candidate_count=1,
31+
# temperature=0.08,
32+
# vertexai=True,
33+
# tool_choice="auto", # NOTE: 動的に変えたい required, none
34+
# )
35+
36+
# tts = livekit_google.TTS(
37+
# language="ja-JP",
38+
# gender="female",
39+
# voice_name="ja-JP-Neural2-B", # use Neural2 voice type: ja-JP-Neural2-C, ja-JP-Neural2-D see: https://cloud.google.com/text-to-speech/docs/voices
40+
# encoding="linear16",
41+
# effects_profile_id="large-automotive-class-device",
42+
# sample_rate=24000,
43+
# pitch=0,
44+
# speaking_rate=1.0,
45+
# )
46+
47+
# stt = livekit_google.STT(
48+
# languages="ja-JP",
49+
# detect_language=True,
50+
# interim_results=True,
51+
# punctuate=True,
52+
# spoken_punctuation=True,
53+
# model="chirp_2",
54+
# sample_rate=16000,
55+
# keywords=[
56+
# ("mi-ho", 24.0), # 仮設定
57+
# ],
58+
# )
59+
60+
# model = livekit_google.beta.realtime.RealtimeModel(
61+
# model="gemini-2.0-flash-exp",
62+
# voice="Charon",
63+
# modalities=["TEXT", "AUDIO"],
64+
# enable_user_audio_transcription=True,
65+
# enable_agent_audio_transcription=True,
66+
# vertexai=True,
67+
# candidate_count=1,
68+
# temperature=0.08,
69+
# instructions="You are a helpful assistant",
70+
# )
7171

7272
# FastAPI アプリ作成
7373
app = FastAPI()

0 commit comments

Comments
 (0)