Upload AmirKerr_ThisPerson_0.py with huggingface_hub
Browse files- AmirKerr_ThisPerson_0.py +10 -8
AmirKerr_ThisPerson_0.py
CHANGED
|
@@ -22,16 +22,17 @@
|
|
| 22 |
try:
|
| 23 |
import torch
|
| 24 |
from diffusers import DiffusionPipeline
|
| 25 |
-
from diffusers.utils import load_image
|
| 26 |
|
| 27 |
# switch to "mps" for apple devices
|
| 28 |
pipe = DiffusionPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", dtype=torch.bfloat16, device_map="cuda")
|
| 29 |
pipe.load_lora_weights("AmirKerr/ThisPerson")
|
| 30 |
|
| 31 |
-
prompt = "
|
| 32 |
-
input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/
|
| 33 |
|
| 34 |
-
image = pipe(image=input_image, prompt=prompt).
|
|
|
|
| 35 |
with open('AmirKerr_ThisPerson_0.txt', 'w', encoding='utf-8') as f:
|
| 36 |
f.write('Everything was good in AmirKerr_ThisPerson_0.txt')
|
| 37 |
except Exception as e:
|
|
@@ -48,16 +49,17 @@ except Exception as e:
|
|
| 48 |
f.write('''```CODE:
|
| 49 |
import torch
|
| 50 |
from diffusers import DiffusionPipeline
|
| 51 |
-
from diffusers.utils import load_image
|
| 52 |
|
| 53 |
# switch to "mps" for apple devices
|
| 54 |
pipe = DiffusionPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", dtype=torch.bfloat16, device_map="cuda")
|
| 55 |
pipe.load_lora_weights("AmirKerr/ThisPerson")
|
| 56 |
|
| 57 |
-
prompt = "
|
| 58 |
-
input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/
|
| 59 |
|
| 60 |
-
image = pipe(image=input_image, prompt=prompt).
|
|
|
|
| 61 |
```
|
| 62 |
|
| 63 |
ERROR:
|
|
|
|
| 22 |
try:
|
| 23 |
import torch
|
| 24 |
from diffusers import DiffusionPipeline
|
| 25 |
+
from diffusers.utils import load_image, export_to_video
|
| 26 |
|
| 27 |
# switch to "mps" for apple devices
|
| 28 |
pipe = DiffusionPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", dtype=torch.bfloat16, device_map="cuda")
|
| 29 |
pipe.load_lora_weights("AmirKerr/ThisPerson")
|
| 30 |
|
| 31 |
+
prompt = "A man with short gray hair plays a red electric guitar."
|
| 32 |
+
input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png")
|
| 33 |
|
| 34 |
+
image = pipe(image=input_image, prompt=prompt).frames[0]
|
| 35 |
+
export_to_video(output, "output.mp4")
|
| 36 |
with open('AmirKerr_ThisPerson_0.txt', 'w', encoding='utf-8') as f:
|
| 37 |
f.write('Everything was good in AmirKerr_ThisPerson_0.txt')
|
| 38 |
except Exception as e:
|
|
|
|
| 49 |
f.write('''```CODE:
|
| 50 |
import torch
|
| 51 |
from diffusers import DiffusionPipeline
|
| 52 |
+
from diffusers.utils import load_image, export_to_video
|
| 53 |
|
| 54 |
# switch to "mps" for apple devices
|
| 55 |
pipe = DiffusionPipeline.from_pretrained("Qwen/Qwen-Image-Edit-2509", dtype=torch.bfloat16, device_map="cuda")
|
| 56 |
pipe.load_lora_weights("AmirKerr/ThisPerson")
|
| 57 |
|
| 58 |
+
prompt = "A man with short gray hair plays a red electric guitar."
|
| 59 |
+
input_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/guitar-man.png")
|
| 60 |
|
| 61 |
+
image = pipe(image=input_image, prompt=prompt).frames[0]
|
| 62 |
+
export_to_video(output, "output.mp4")
|
| 63 |
```
|
| 64 |
|
| 65 |
ERROR:
|