OmniAICreator commited on
Commit
1284e28
·
verified ·
1 Parent(s): fe601f1

Upload inference_example.ipynb

Browse files
Files changed (1) hide show
  1. inference_example.ipynb +174 -0
inference_example.ipynb ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "b92d046f",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import os\n",
11
+ "os.environ['VLLM_USE_V1'] = '0'\n",
12
+ "os.environ['VLLM_WORKER_MULTIPROC_METHOD'] = 'spawn'\n",
13
+ "os.environ[\"VLLM_LOGGING_LEVEL\"] = \"ERROR\"\n",
14
+ "os.environ['CUDA_VISIBLE_DEVICES'] = \"0\"\n",
15
+ "import torch\n",
16
+ "import warnings\n",
17
+ "import numpy as np\n",
18
+ "\n",
19
+ "warnings.filterwarnings('ignore')\n",
20
+ "warnings.filterwarnings('ignore', category=DeprecationWarning)\n",
21
+ "warnings.filterwarnings('ignore', category=FutureWarning)\n",
22
+ "warnings.filterwarnings('ignore', category=UserWarning)\n",
23
+ "\n",
24
+ "from qwen_omni_utils import process_mm_info\n",
25
+ "from transformers import Qwen3OmniMoeProcessor\n",
26
+ "\n",
27
+ "def _load_model_processor():\n",
28
+ " if USE_TRANSFORMERS:\n",
29
+ " from transformers import Qwen3OmniMoeForConditionalGeneration\n",
30
+ " if TRANSFORMERS_USE_FLASH_ATTN2:\n",
31
+ " model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(MODEL_PATH,\n",
32
+ " dtype='auto',\n",
33
+ " attn_implementation='flash_attention_2',\n",
34
+ " device_map=\"auto\")\n",
35
+ " else:\n",
36
+ " model = Qwen3OmniMoeForConditionalGeneration.from_pretrained(MODEL_PATH, device_map=\"auto\", dtype='auto')\n",
37
+ " else:\n",
38
+ " from vllm import LLM\n",
39
+ " model = LLM(\n",
40
+ " model=MODEL_PATH, trust_remote_code=True, gpu_memory_utilization=0.95,\n",
41
+ " tensor_parallel_size=torch.cuda.device_count(),\n",
42
+ " limit_mm_per_prompt={'image': 1, 'video': 3, 'audio': 3},\n",
43
+ " max_num_seqs=1,\n",
44
+ " max_model_len=8192,\n",
45
+ " seed=1234,\n",
46
+ " )\n",
47
+ "\n",
48
+ " processor = Qwen3OmniMoeProcessor.from_pretrained(MODEL_PATH)\n",
49
+ " return model, processor\n",
50
+ "\n",
51
+ "def run_model(model, processor, messages, return_audio, use_audio_in_video):\n",
52
+ " if USE_TRANSFORMERS:\n",
53
+ " text = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)\n",
54
+ " audios, images, videos = process_mm_info(messages, use_audio_in_video=use_audio_in_video)\n",
55
+ " inputs = processor(text=text, audio=audios, images=images, videos=videos, return_tensors=\"pt\", padding=True, use_audio_in_video=use_audio_in_video)\n",
56
+ " inputs = inputs.to(model.device).to(model.dtype)\n",
57
+ " text_ids, audio = model.generate(**inputs,\n",
58
+ " thinker_return_dict_in_generate=True,\n",
59
+ " thinker_max_new_tokens=8192,\n",
60
+ " thinker_do_sample=True,\n",
61
+ " thinker_top_p=0.95,\n",
62
+ " thinker_top_k=20,\n",
63
+ " thinker_temperature=0.6,\n",
64
+ " speaker=\"Chelsie\",\n",
65
+ " use_audio_in_video=use_audio_in_video,\n",
66
+ " return_audio=return_audio)\n",
67
+ " response = processor.batch_decode(text_ids.sequences[:, inputs[\"input_ids\"].shape[1] :], skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n",
68
+ " if audio is not None:\n",
69
+ " audio = np.array(audio.reshape(-1).detach().cpu().numpy() * 32767).astype(np.int16)\n",
70
+ " return response, audio\n",
71
+ " else:\n",
72
+ " from vllm import SamplingParams\n",
73
+ " sampling_params = SamplingParams(temperature=0.6, top_p=0.95, top_k=20, max_tokens=4096)\n",
74
+ " text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\n",
75
+ " audios, images, videos = process_mm_info(messages, use_audio_in_video=use_audio_in_video)\n",
76
+ " inputs = {'prompt': text, 'multi_modal_data': {}, \"mm_processor_kwargs\": {\"use_audio_in_video\": use_audio_in_video}}\n",
77
+ " if images is not None: inputs['multi_modal_data']['image'] = images\n",
78
+ " if videos is not None: inputs['multi_modal_data']['video'] = videos\n",
79
+ " if audios is not None: inputs['multi_modal_data']['audio'] = audios\n",
80
+ " outputs = model.generate(inputs, sampling_params=sampling_params)\n",
81
+ " response = outputs[0].outputs[0].text\n",
82
+ " return response, None\n"
83
+ ]
84
+ },
85
+ {
86
+ "cell_type": "code",
87
+ "execution_count": null,
88
+ "id": "d37dcedc",
89
+ "metadata": {},
90
+ "outputs": [],
91
+ "source": [
92
+ "import librosa\n",
93
+ "import audioread\n",
94
+ "\n",
95
+ "from IPython.display import Audio\n",
96
+ "\n",
97
+ "MODEL_PATH = \"NandemoGHS/Anime-Speech-Japanese-Refiner-FP8-DYNAMIC\"\n",
98
+ "\n",
99
+ "USE_TRANSFORMERS = False\n",
100
+ "TRANSFORMERS_USE_FLASH_ATTN2 = True\n",
101
+ "\n",
102
+ "model, processor = _load_model_processor()\n",
103
+ "\n",
104
+ "USE_AUDIO_IN_VIDEO = True"
105
+ ]
106
+ },
107
+ {
108
+ "cell_type": "code",
109
+ "execution_count": null,
110
+ "id": "5bf60bf5",
111
+ "metadata": {},
112
+ "outputs": [],
113
+ "source": [
114
+ "audio_path = \"https://huggingface.co/NandemoGHS/Anime-Speech-Japanese-Refiner/resolve/main/examples/example1.wav\"\n",
115
+ "\n",
116
+ "original_transcription = \"あっ、あぁんっ、好き、大好きですわ…。もっと…はぁ、んんっ、はぁんっ、もっとぉ!\"\n",
117
+ "\n",
118
+ "prompt = f\"\"\"これから与えられる音声クリップとその文字起こしについて、声の特徴と読み上げスタイル、感情などをアノテーションしたうえで、日本語の短いキャプションで要約してください。\n",
119
+ "出力には以下の項目を含めてください。\n",
120
+ "\n",
121
+ "profile: 話者プロファイル(例: お姉さん的な女性声/落ち着いた男性声/少女声 等)\n",
122
+ "mood: 感情・ムード(例: 明るい/落ち着いた/緊張/怒り/恐怖/悲しみ/快楽 等)\n",
123
+ "speed: 話速(例: とても遅い/やや速い/一定/(1.2×) 等)\n",
124
+ "prosody: 抑揚・リズム(例: 平坦/メリハリ/語尾上げ下げ/ため息混じり 等)\n",
125
+ "pitch_timbre: ピッチ/声質(例: 高め/低め/息多め/張りのある/囁き 等)\n",
126
+ "style: 発話スタイル(例: ナレーション風/会話調/朗読調/プレゼン調/囁き/喘ぎ/嗚咽/叫び 等)\n",
127
+ "emotion: 感情タグ(次のリストから1つ選択: [\"angry\", \"sad\", \"disdainful\", \"excited\", \"surprised\", \"satisfied\", \"unhappy\", \"anxious\", \"hysterical\", \"delighted\", \"scared\", \"worried\", \"indifferent\", \"upset\", \"impatient\", \"nervous\", \"guilty\", \"scornful\", \"frustrated\", \"depressed\", \"panicked\", \"furious\", \"empathetic\", \"embarrassed\", \"reluctant\", \"disgusted\", \"keen\", \"moved\", \"proud\", \"relaxed\", \"grateful\", \"confident\", \"interested\", \"curious\", \"confused\", \"joyful\", \"disapproving\", \"negative\", \"denying\", \"astonished\", \"serious\", \"sarcastic\", \"conciliative\", \"comforting\", \"sincere\", \"sneering\", \"hesitating\", \"yielding\", \"painful\", \"awkward\", \"amused\", \"loving\", \"dating\", \"longing\", \"aroused\", \"seductive\", \"ecstatic\", \"shy\"])\n",
128
+ "notes: 特記事項(間の取り方、笑い・ため・ブレス、ノイズ感、キス音、効果音、チュパ音 等)\n",
129
+ "caption: 上記を1〜2文・全角30〜80文字で自然文に要約\n",
130
+ "refined_text: 元の文字起こしテキストに、必要に応じて特殊タグを音声中のイベントの描写として文章のどこかに挿入したもの(必要なければ元テキストをそのまま出力)。\n",
131
+ "\n",
132
+ "元の文字起こしテキスト: {original_transcription}\n",
133
+ "元の音声クリップ:\"\"\"\n",
134
+ "\n",
135
+ "messages = [\n",
136
+ " {\n",
137
+ " \"role\": \"user\",\n",
138
+ " \"content\": [\n",
139
+ " {\"type\": \"text\", \"text\": prompt},\n",
140
+ " {\"type\": \"audio\", \"audio\": audio_path},\n",
141
+ " ]\n",
142
+ " }\n",
143
+ "]\n",
144
+ "\n",
145
+ "display(Audio(librosa.load(audioread.ffdec.FFmpegAudioFile(audio_path), sr=16000)[0], rate=16000))\n",
146
+ "\n",
147
+ "response, _ = run_model(model=model, messages=messages, processor=processor, return_audio=False, use_audio_in_video=USE_AUDIO_IN_VIDEO)\n",
148
+ "\n",
149
+ "print(response)"
150
+ ]
151
+ }
152
+ ],
153
+ "metadata": {
154
+ "kernelspec": {
155
+ "display_name": "venv (3.10.12)",
156
+ "language": "python",
157
+ "name": "python3"
158
+ },
159
+ "language_info": {
160
+ "codemirror_mode": {
161
+ "name": "ipython",
162
+ "version": 3
163
+ },
164
+ "file_extension": ".py",
165
+ "mimetype": "text/x-python",
166
+ "name": "python",
167
+ "nbconvert_exporter": "python",
168
+ "pygments_lexer": "ipython3",
169
+ "version": "3.10.12"
170
+ }
171
+ },
172
+ "nbformat": 4,
173
+ "nbformat_minor": 5
174
+ }