diff --git a/app.py b/app.py
index 2a3928e1897e28ac9c85f8527aad31c683ac9a5f..0ad403b0260bcc99a5663df5d21eb0e7400e6429 100644
--- a/app.py
+++ b/app.py
@@ -15,7 +15,8 @@ from typing import *
import torch
import numpy as np
from PIL import Image
-import tempfile
+import base64
+import io
from trellis2.modules.sparse import SparseTensor
from trellis2.pipelines import Trellis2ImageTo3DPipeline
from trellis2.renderers import EnvMap
@@ -25,7 +26,247 @@ import o_voxel
MAX_SEED = np.iinfo(np.int32).max
TMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tmp')
-os.makedirs(TMP_DIR, exist_ok=True)
+MODES = [
+ {"name": "Normal", "icon": "assets/app/normal.png", "render_key": "normal"},
+ {"name": "Clay render", "icon": "assets/app/clay.png", "render_key": "clay"},
+ {"name": "Base color", "icon": "assets/app/basecolor.png", "render_key": "base_color"},
+ {"name": "HDRI forest", "icon": "assets/app/hdri_forest.png", "render_key": "shaded_forest"},
+ {"name": "HDRI sunset", "icon": "assets/app/hdri_sunset.png", "render_key": "shaded_sunset"},
+ {"name": "HDRI courtyard", "icon": "assets/app/hdri_courtyard.png", "render_key": "shaded_courtyard"},
+]
+STEPS = 8
+DEFAULT_MODE = 3
+DEFAULT_STEP = 3
+
+
+css = """
+/* Overwrite Gradio Default Style */
+.stepper-wrapper {
+ padding: 0;
+}
+
+.stepper-container {
+ padding: 0;
+ align-items: center;
+}
+
+.step-button {
+ flex-direction: row;
+}
+
+.step-connector {
+ transform: none;
+}
+
+.step-number {
+ width: 16px;
+ height: 16px;
+}
+
+.step-label {
+ position: relative;
+ bottom: 0;
+}
+
+.wrap.center.full {
+ inset: 0;
+ height: 100%;
+}
+
+.wrap.center.full.translucent {
+ background: var(--block-background-fill);
+}
+
+.meta-text-center {
+ display: block !important;
+ position: absolute !important;
+ top: unset !important;
+ bottom: 0 !important;
+ right: 0 !important;
+ transform: unset !important;
+}
+
+
+/* Previewer */
+.previewer-container {
+ font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif;
+ width: 100%;
+ height: 722px;
+ margin: 0 auto;
+ padding: 20px;
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+}
+
+/* Row 1: Display Modes */
+.previewer-container .mode-row {
+ width: 100%;
+ display: flex;
+ gap: 8px;
+ justify-content: center;
+ margin-bottom: 20px;
+ flex-wrap: wrap;
+}
+.previewer-container .mode-btn {
+ width: 24px;
+ height: 24px;
+ border-radius: 50%;
+ cursor: pointer;
+ opacity: 0.5;
+ transition: all 0.2s;
+ border: 2px solid #ddd;
+ object-fit: cover;
+}
+.previewer-container .mode-btn:hover { opacity: 0.9; transform: scale(1.1); }
+.previewer-container .mode-btn.active {
+ opacity: 1;
+ border-color: var(--color-accent);
+ transform: scale(1.1);
+}
+
+/* Row 2: Display Image */
+.previewer-container .display-row {
+ margin-bottom: 20px;
+ min-height: 400px;
+ width: 100%;
+ flex-grow: 1;
+ display: flex;
+ justify-content: center;
+ align-items: center;
+}
+.previewer-container .previewer-main-image {
+ max-width: 100%;
+ max-height: 100%;
+ flex-grow: 1;
+ object-fit: contain;
+ display: none;
+}
+.previewer-container .previewer-main-image.visible {
+ display: block;
+}
+
+/* Row 3: Custom HTML Slider */
+.previewer-container .slider-row {
+ width: 100%;
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ gap: 10px;
+ padding: 0 10px;
+}
+
+.previewer-container input[type=range] {
+ -webkit-appearance: none;
+ width: 100%;
+ max-width: 400px;
+ background: transparent;
+}
+.previewer-container input[type=range]::-webkit-slider-runnable-track {
+ width: 100%;
+ height: 8px;
+ cursor: pointer;
+ background: #ddd;
+ border-radius: 5px;
+}
+.previewer-container input[type=range]::-webkit-slider-thumb {
+ height: 20px;
+ width: 20px;
+ border-radius: 50%;
+ background: var(--color-accent);
+ cursor: pointer;
+ -webkit-appearance: none;
+ margin-top: -6px;
+ box-shadow: 0 2px 5px rgba(0,0,0,0.2);
+ transition: transform 0.1s;
+}
+.previewer-container input[type=range]::-webkit-slider-thumb:hover {
+ transform: scale(1.2);
+}
+"""
+
+
+head = """
+
+"""
+
+
+empty_html = f"""
+
+"""
+
+
+def image_to_base64(image):
+ buffered = io.BytesIO()
+ image = image.convert("RGB")
+ image.save(buffered, format="jpeg", quality=85)
+ img_str = base64.b64encode(buffered.getvalue()).decode()
+ return f"data:image/jpeg;base64,{img_str}"
def start_session(req: gr.Request):
@@ -38,19 +279,20 @@ def end_session(req: gr.Request):
shutil.rmtree(user_dir)
-def remove_background(input: Image.Image) -> Image.Image:
- with tempfile.NamedTemporaryFile(suffix='.png') as f:
- input = input.convert('RGB')
- input.save(f.name)
- output = rmbg_client.predict(handle_file(f.name), api_name="/image")[0][0]
- output = Image.open(output)
- return output
+def remove_background(input: Image.Image, user_dir: str) -> Image.Image:
+ input = input.convert('RGB')
+ os.makedirs(user_dir, exist_ok=True)
+ input.save(os.path.join(user_dir, 'input.png'))
+ output = rmbg_client.predict(handle_file(os.path.join(user_dir, 'input.png')), api_name="/image")[0][0]
+ output = Image.open(output)
+ return output
-def preprocess_image(input: Image.Image) -> Image.Image:
+def preprocess_image(input: Image.Image, req: gr.Request,) -> Image.Image:
"""
Preprocess the input image.
"""
+ user_dir = os.path.join(TMP_DIR, str(req.session_hash))
# if has alpha channel, use it directly; otherwise, remove background
has_alpha = False
if input.mode == 'RGBA':
@@ -64,7 +306,7 @@ def preprocess_image(input: Image.Image) -> Image.Image:
if has_alpha:
output = input
else:
- output = remove_background(input)
+ output = remove_background(input, user_dir)
output_np = np.array(output)
alpha = output_np[:, :, 3]
bbox = np.argwhere(alpha > 0.8 * 255)
@@ -126,24 +368,7 @@ def image_to_3d(
req: gr.Request,
progress=gr.Progress(track_tqdm=True),
) -> str:
- """
- Convert an image to a 3D model.
-
- Args:
- image (Image.Image): The input image.
- seed (int): The random seed.
- ss_guidance_strength (float): The guidance strength for sparse structure generation.
- ss_sampling_steps (int): The number of sampling steps for sparse structure generation.
- shape_slat_guidance_strength (float): The guidance strength for shape slat generation.
- shape_slat_sampling_steps (int): The number of sampling steps for shape slat generation.
- tex_slat_guidance_strength (float): The guidance strength for texture slat generation.
- tex_slat_sampling_steps (int): The number of sampling steps for texture slat generation.
-
- Returns:
- str: The path to the preview video of the 3D model.
- str: The path to the 3D model.
- """
- user_dir = os.path.join(TMP_DIR, str(req.session_hash))
+ # --- Sampling ---
outputs, latents = pipeline.run(
image,
seed=seed,
@@ -175,13 +400,66 @@ def image_to_3d(
)
mesh = outputs[0]
mesh.simplify(16777216) # nvdiffrast limit
- images = render_utils.make_pbr_vis_frames(
- render_utils.render_snapshot(mesh, resolution=1024, r=2, fov=36, envmap=envmap),
- resolution=1024
- )
+ images = render_utils.render_snapshot(mesh, resolution=1024, r=2, fov=36, nviews=STEPS, envmap=envmap)
state = pack_state(latents)
torch.cuda.empty_cache()
- return state, [Image.fromarray(image) for image in images]
+
+ # --- HTML Construction ---
+ # The Stack of 48 Images
+ images_html = ""
+ for m_idx, mode in enumerate(MODES):
+ for s_idx in range(STEPS):
+ # ID Naming Convention: view-m{mode}-s{step}
+ unique_id = f"view-m{m_idx}-s{s_idx}"
+
+ # Logic: Only Mode 0, Step 0 is visible initially
+ is_visible = (m_idx == DEFAULT_MODE and s_idx == DEFAULT_STEP)
+ vis_class = "visible" if is_visible else ""
+
+ # Image Source
+ img_base64 = image_to_base64(Image.fromarray(images[mode['render_key']][s_idx]))
+
+ # Render the Tag
+ images_html += f"""
+
+ """
+
+ # Button Row HTML
+ btns_html = ""
+ for idx, mode in enumerate(MODES):
+ active_class = "active" if idx == DEFAULT_MODE else ""
+ # Note: onclick calls the JS function defined in Head
+ btns_html += f"""
+
+ """
+
+ # Assemble the full component
+ full_html = f"""
+
+
+
+ {images_html}
+
+
+
+
+ {btns_html}
+
+
+
+
+
+
+
+ """
+
+ return state, full_html
@spaces.GPU(duration=60)
@@ -206,6 +484,7 @@ def extract_glb(
user_dir = os.path.join(TMP_DIR, str(req.session_hash))
shape_slat, tex_slat, res = unpack_state(state)
mesh = pipeline.decode_latent(shape_slat, tex_slat, res)[0]
+ mesh.simplify(16777216)
glb = o_voxel.postprocess.to_glb(
vertices=mesh.vertices,
faces=mesh.faces,
@@ -229,36 +508,6 @@ def extract_glb(
return glb_path, glb_path
-css = """
-.stepper-wrapper {
- padding: 0;
-}
-
-.stepper-container {
- padding: 0;
- align-items: center;
-}
-
-.step-button {
- flex-direction: row;
-}
-
-.step-connector {
- transform: none;
-}
-
-.step-number {
- width: 16px;
- height: 16px;
-}
-
-.step-label {
- position: relative;
- bottom: 0;
-}
-"""
-
-
with gr.Blocks(delete_cache=(600, 600)) as demo:
gr.Markdown("""
## Image to 3D Asset with [TRELLIS.2](https://microsoft.github.io/trellis.2)
@@ -270,11 +519,13 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
with gr.Column(scale=1, min_width=360):
image_prompt = gr.Image(label="Image Prompt", format="png", image_mode="RGBA", type="pil", height=400)
- resolution = gr.Radio(["512", "1024", "1536"], label="Resolution", value="512")
+ resolution = gr.Radio(["512", "1024", "1536"], label="Resolution", value="1024")
seed = gr.Slider(0, MAX_SEED, label="Seed", value=0, step=1)
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
- decimation_target = gr.Slider(10000, 500000, label="Decimation Target", value=100000, step=10000)
+ decimation_target = gr.Slider(100000, 1000000, label="Decimation Target", value=500000, step=10000)
texture_size = gr.Slider(1024, 4096, label="Texture Size", value=2048, step=1024)
+
+ generate_btn = gr.Button("Generate")
with gr.Accordion(label="Advanced Settings", open=False):
gr.Markdown("Stage 1: Sparse Structure Generation")
@@ -296,22 +547,20 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
tex_slat_sampling_steps = gr.Slider(1, 50, label="Sampling Steps", value=12, step=1)
tex_slat_rescale_t = gr.Slider(1.0, 6.0, label="Rescale T", value=3.0, step=0.1)
- generate_btn = gr.Button("Generate")
-
with gr.Column(scale=10):
with gr.Walkthrough(selected=0) as walkthrough:
with gr.Step("Preview", id=0):
- preview_output = gr.Gallery(label="3D Asset Preview", height=800, show_label=True, preview=True)
+ preview_output = gr.HTML(empty_html, label="3D Asset Preview", show_label=True, container=True, js_on_load="modify_html_container()")
extract_btn = gr.Button("Extract GLB")
with gr.Step("Extract", id=1):
- glb_output = gr.Model3D(label="Extracted GLB", height=800, show_label=True, display_mode="solid", clear_color=(0.25, 0.25, 0.25, 1.0))
+ glb_output = gr.Model3D(label="Extracted GLB", height=724, show_label=True, display_mode="solid", clear_color=(0.25, 0.25, 0.25, 1.0))
download_btn = gr.DownloadButton(label="Download GLB")
with gr.Column(scale=1, min_width=172):
examples = gr.Examples(
examples=[
- f'assets/example_images/{image}'
- for image in os.listdir("assets/example_images")
+ f'assets/example_image/{image}'
+ for image in os.listdir("assets/example_image")
],
inputs=[image_prompt],
fn=preprocess_image,
@@ -361,15 +610,33 @@ with gr.Blocks(delete_cache=(600, 600)) as demo:
# Launch the Gradio app
if __name__ == "__main__":
+ os.makedirs(TMP_DIR, exist_ok=True)
+
+ # Construct ui components
+ btn_img_base64_strs = {}
+ for i in range(len(MODES)):
+ icon = Image.open(MODES[i]['icon'])
+ MODES[i]['icon_base64'] = image_to_base64(icon)
+
rmbg_client = Client("briaai/BRIA-RMBG-2.0")
pipeline = Trellis2ImageTo3DPipeline.from_pretrained('microsoft/TRELLIS.2-4B')
pipeline.rembg_model = None
pipeline.low_vram = False
pipeline.cuda()
- envmap = EnvMap(torch.tensor(
- cv2.cvtColor(cv2.imread('assets/hdri/forest.exr', cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB),
- dtype=torch.float32, device='cuda'
- ))
+ envmap = {
+ 'forest': EnvMap(torch.tensor(
+ cv2.cvtColor(cv2.imread('assets/hdri/forest.exr', cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB),
+ dtype=torch.float32, device='cuda'
+ )),
+ 'sunset': EnvMap(torch.tensor(
+ cv2.cvtColor(cv2.imread('assets/hdri/sunset.exr', cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB),
+ dtype=torch.float32, device='cuda'
+ )),
+ 'courtyard': EnvMap(torch.tensor(
+ cv2.cvtColor(cv2.imread('assets/hdri/courtyard.exr', cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB),
+ dtype=torch.float32, device='cuda'
+ )),
+ }
- demo.launch(css=css)
\ No newline at end of file
+ demo.launch(css=css, head=head)
diff --git a/assets/app/basecolor.png b/assets/app/basecolor.png
new file mode 100644
index 0000000000000000000000000000000000000000..e7dbeaf6344b4b292757964eca2c6960e7e10a68
Binary files /dev/null and b/assets/app/basecolor.png differ
diff --git a/assets/app/clay.png b/assets/app/clay.png
new file mode 100644
index 0000000000000000000000000000000000000000..e02866a15d1101d1d1cd3f6098c7025c0763cfdb
Binary files /dev/null and b/assets/app/clay.png differ
diff --git a/assets/app/hdri_city.png b/assets/app/hdri_city.png
new file mode 100644
index 0000000000000000000000000000000000000000..43e1c2a54c3bb8aa376613c698268fca0aea9b29
Binary files /dev/null and b/assets/app/hdri_city.png differ
diff --git a/assets/app/hdri_courtyard.png b/assets/app/hdri_courtyard.png
new file mode 100644
index 0000000000000000000000000000000000000000..4261ad62862163d32c22ac38495defdbdf3bebd0
Binary files /dev/null and b/assets/app/hdri_courtyard.png differ
diff --git a/assets/app/hdri_forest.png b/assets/app/hdri_forest.png
new file mode 100644
index 0000000000000000000000000000000000000000..7617fe19adf536c12db3f4bf7bc1f6a42f54f8b7
Binary files /dev/null and b/assets/app/hdri_forest.png differ
diff --git a/assets/app/hdri_interior.png b/assets/app/hdri_interior.png
new file mode 100644
index 0000000000000000000000000000000000000000..e00c1d656c94c71402e93067cac64725425754d0
Binary files /dev/null and b/assets/app/hdri_interior.png differ
diff --git a/assets/app/hdri_night.png b/assets/app/hdri_night.png
new file mode 100644
index 0000000000000000000000000000000000000000..f0423d221069904b0de32c82956675a210e8c375
Binary files /dev/null and b/assets/app/hdri_night.png differ
diff --git a/assets/app/hdri_studio.png b/assets/app/hdri_studio.png
new file mode 100644
index 0000000000000000000000000000000000000000..0f5a4e8d5c5717b6ca4217e6e081e695e2bec264
Binary files /dev/null and b/assets/app/hdri_studio.png differ
diff --git a/assets/app/hdri_sunrise.png b/assets/app/hdri_sunrise.png
new file mode 100644
index 0000000000000000000000000000000000000000..9cee3bb066a0f01ddf84b088da31fba1af6648d0
Binary files /dev/null and b/assets/app/hdri_sunrise.png differ
diff --git a/assets/app/hdri_sunset.png b/assets/app/hdri_sunset.png
new file mode 100644
index 0000000000000000000000000000000000000000..bd67070912b846b7cb353d13696f4aff4be40831
Binary files /dev/null and b/assets/app/hdri_sunset.png differ
diff --git a/assets/app/normal.png b/assets/app/normal.png
new file mode 100644
index 0000000000000000000000000000000000000000..352e92b5750414ee1c2562d1c719468ce5da2883
Binary files /dev/null and b/assets/app/normal.png differ
diff --git a/assets/example_images/0a34fae7ba57cb8870df5325b9c30ea474def1b0913c19c596655b85a79fdee4.webp b/assets/example_images/0a34fae7ba57cb8870df5325b9c30ea474def1b0913c19c596655b85a79fdee4.webp
deleted file mode 100644
index 8cb4652f561eca8ab3aa32f6e00457bc9b9f194a..0000000000000000000000000000000000000000
--- a/assets/example_images/0a34fae7ba57cb8870df5325b9c30ea474def1b0913c19c596655b85a79fdee4.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:83d3765ff57511f11054d62e6beaf52648d277006b6dcb3c1d5f9e03ef502c49
-size 108096
diff --git a/assets/example_images/2bb0932314bae71eec94d0d01a20d3f761ade9664e013b9a9a43c00a2f44163a.webp b/assets/example_images/2bb0932314bae71eec94d0d01a20d3f761ade9664e013b9a9a43c00a2f44163a.webp
deleted file mode 100644
index 962281565b2c60a013e64776313145fd8186e4ef..0000000000000000000000000000000000000000
--- a/assets/example_images/2bb0932314bae71eec94d0d01a20d3f761ade9664e013b9a9a43c00a2f44163a.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:c4e26e3e2f3de96cf396c64f1cef39ac82c13f231e2567fe0fe902dc103bf949
-size 169506
diff --git a/assets/example_images/3723615e3766742ae35b09517152a58c36d62b707bc60d7f76f8a6c922add2c0.webp b/assets/example_images/3723615e3766742ae35b09517152a58c36d62b707bc60d7f76f8a6c922add2c0.webp
deleted file mode 100644
index 0af9ee139499ab072b6068e5fb1d974f99103a30..0000000000000000000000000000000000000000
--- a/assets/example_images/3723615e3766742ae35b09517152a58c36d62b707bc60d7f76f8a6c922add2c0.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:88c65db47f93beed7d6b184c32f9fa27c5f64ced2e3d4f535ad628179763cff8
-size 115508
diff --git a/assets/example_images/454e7d8a30486c0635369936e7bec5677b78ae5f436d0e46af0d533738be859f.webp b/assets/example_images/454e7d8a30486c0635369936e7bec5677b78ae5f436d0e46af0d533738be859f.webp
deleted file mode 100644
index 0a3a3a893b1fa0914e63d79e82d7894afe552483..0000000000000000000000000000000000000000
--- a/assets/example_images/454e7d8a30486c0635369936e7bec5677b78ae5f436d0e46af0d533738be859f.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:47be8271d2d1d27fc334cc470d63534807ad44c6f50cee18a204125a1c6b66a4
-size 140210
diff --git a/assets/example_images/50b70c5f88a5961d2c786158655d2fce5c3b214b2717956500a66a4e5b5fbe37.webp b/assets/example_images/50b70c5f88a5961d2c786158655d2fce5c3b214b2717956500a66a4e5b5fbe37.webp
deleted file mode 100644
index 01a571a7a2ad661e29c1a4c3b5ba68353451cd70..0000000000000000000000000000000000000000
--- a/assets/example_images/50b70c5f88a5961d2c786158655d2fce5c3b214b2717956500a66a4e5b5fbe37.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:933c10aeebb2920b08cb34a08ab1878817b64eb9e30efdcc3d76731069fc0849
-size 131488
diff --git a/assets/example_images/51b1b31d40476b123db70a51ae0b5f8b8d0db695b616bc2ec4e6324eb178fc14.webp b/assets/example_images/51b1b31d40476b123db70a51ae0b5f8b8d0db695b616bc2ec4e6324eb178fc14.webp
deleted file mode 100644
index 60375c981e776e3667287f8df99b04f12098f1df..0000000000000000000000000000000000000000
--- a/assets/example_images/51b1b31d40476b123db70a51ae0b5f8b8d0db695b616bc2ec4e6324eb178fc14.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:2d5224235eeadadfdb93ab37664055ef55ffd930b085d268cd62c5faf9d101de
-size 136872
diff --git a/assets/example_images/52284bf45134c59a94be150a5b18b9cc3619ada4b30ded8d8d0288383b8c016f.webp b/assets/example_images/52284bf45134c59a94be150a5b18b9cc3619ada4b30ded8d8d0288383b8c016f.webp
deleted file mode 100644
index ddd363de151adc06a1815d148a69fce18a238942..0000000000000000000000000000000000000000
--- a/assets/example_images/52284bf45134c59a94be150a5b18b9cc3619ada4b30ded8d8d0288383b8c016f.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:86b909e6847118f2cec6e9c0945e6a78154ce5918f4ba14c0b0afa0be2e8647f
-size 150020
diff --git a/assets/example_images/5c80e5e03a3b60b6f03eaf555ba1dafc0e4230c472d7e8c8e2c5ca0a0dfcef10.webp b/assets/example_images/5c80e5e03a3b60b6f03eaf555ba1dafc0e4230c472d7e8c8e2c5ca0a0dfcef10.webp
deleted file mode 100644
index 7a10d813fb288d647e49f2b8bb2f5cdf578f873d..0000000000000000000000000000000000000000
--- a/assets/example_images/5c80e5e03a3b60b6f03eaf555ba1dafc0e4230c472d7e8c8e2c5ca0a0dfcef10.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:fbd98cf5da79c56f8efc6cf86804391e71bdad2e935d21a7262472653a0674dc
-size 126914
diff --git a/assets/example_images/61fea9d08e0bd9a067c9f696621dc89165afb5aab318d0701bc025d7863dabf0.webp b/assets/example_images/61fea9d08e0bd9a067c9f696621dc89165afb5aab318d0701bc025d7863dabf0.webp
deleted file mode 100644
index 021980a74ef652ae55bef0272527aab211275753..0000000000000000000000000000000000000000
--- a/assets/example_images/61fea9d08e0bd9a067c9f696621dc89165afb5aab318d0701bc025d7863dabf0.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:dfad86b88eb81da36a5acf77891822c042e991436bb004c7d75d8b19e89c45bd
-size 110562
diff --git a/assets/example_images/6b6d89d46d7f53e6409dbe695a9ef8f97c5257e641da35015a78579e903acdad.webp b/assets/example_images/6b6d89d46d7f53e6409dbe695a9ef8f97c5257e641da35015a78579e903acdad.webp
deleted file mode 100644
index 8f787a298a8ba70c64beff1ad88cc9b96d4d58b9..0000000000000000000000000000000000000000
Binary files a/assets/example_images/6b6d89d46d7f53e6409dbe695a9ef8f97c5257e641da35015a78579e903acdad.webp and /dev/null differ
diff --git a/assets/example_images/7b540da337f576ffce2adc36c7459b9bbbfd845ab2160a6abbe986f1f906f6cd.webp b/assets/example_images/7b540da337f576ffce2adc36c7459b9bbbfd845ab2160a6abbe986f1f906f6cd.webp
deleted file mode 100644
index b7a521fcdb71be9f5b095bd0c2a3a8b3ed9a32fc..0000000000000000000000000000000000000000
Binary files a/assets/example_images/7b540da337f576ffce2adc36c7459b9bbbfd845ab2160a6abbe986f1f906f6cd.webp and /dev/null differ
diff --git a/assets/example_images/7d7659d5943e85a73a4ffe33c6dd48f5d79601e9bf11b103516f419ce9fbf713.webp b/assets/example_images/7d7659d5943e85a73a4ffe33c6dd48f5d79601e9bf11b103516f419ce9fbf713.webp
deleted file mode 100644
index 60dd646a277d93fb76441ec47f88e22c4cf6967a..0000000000000000000000000000000000000000
--- a/assets/example_images/7d7659d5943e85a73a4ffe33c6dd48f5d79601e9bf11b103516f419ce9fbf713.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:6707893380d3c791a9a2d33bc0caa5f10719f7168d25f8d447fa26f4420919db
-size 108942
diff --git a/assets/example_images/8aa698c59aab48d4ce69a558d9159107890e3d64e522af404d9635ad0be21f88.webp b/assets/example_images/8aa698c59aab48d4ce69a558d9159107890e3d64e522af404d9635ad0be21f88.webp
deleted file mode 100644
index f12cbf93875f086f3084efc29072f8fb579f3143..0000000000000000000000000000000000000000
--- a/assets/example_images/8aa698c59aab48d4ce69a558d9159107890e3d64e522af404d9635ad0be21f88.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:a319ace2549835da92a6ffa5db73eebd7fce29079e5865cb32dfbdac21d9b900
-size 100414
diff --git a/assets/example_images/9c306c7bd0e857285f536fb500c0828e5fad4e23c3ceeab92c888c568fa19101.webp b/assets/example_images/9c306c7bd0e857285f536fb500c0828e5fad4e23c3ceeab92c888c568fa19101.webp
deleted file mode 100644
index b668a21a0f5e8a3c0730377da02357cd6e30b869..0000000000000000000000000000000000000000
--- a/assets/example_images/9c306c7bd0e857285f536fb500c0828e5fad4e23c3ceeab92c888c568fa19101.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:0d42562b49df17554583f2acbe18f8a26c9075cdab278caefab27aa7587df4ac
-size 115424
diff --git a/assets/example_images/a13d176cd7a7d457b42d1b32223bcff1a45dafbbb42c6a272b97d65ac2f2eb52.webp b/assets/example_images/a13d176cd7a7d457b42d1b32223bcff1a45dafbbb42c6a272b97d65ac2f2eb52.webp
deleted file mode 100644
index 705a2b5ffe6a32291abfeb21f9a6494f98fa2a60..0000000000000000000000000000000000000000
Binary files a/assets/example_images/a13d176cd7a7d457b42d1b32223bcff1a45dafbbb42c6a272b97d65ac2f2eb52.webp and /dev/null differ
diff --git a/assets/example_images/be7deb26f4fdd2080d4288668af4c39e526564282c579559ff8a4126ca4ed6c1.webp b/assets/example_images/be7deb26f4fdd2080d4288668af4c39e526564282c579559ff8a4126ca4ed6c1.webp
deleted file mode 100644
index dbc38af43a3dade7feb152d14608072686389465..0000000000000000000000000000000000000000
--- a/assets/example_images/be7deb26f4fdd2080d4288668af4c39e526564282c579559ff8a4126ca4ed6c1.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:0928c46a44b8adcaa04cf6c3720b7ee4f6f5013717031cd3373a8ec1b92aef9d
-size 150786
diff --git a/assets/example_images/c3d714bc125f06ce1187799d5ca10736b4064a24c141e627089aad2bdedf7aa5.webp b/assets/example_images/c3d714bc125f06ce1187799d5ca10736b4064a24c141e627089aad2bdedf7aa5.webp
deleted file mode 100644
index 4203f61711b705c5e5e0e6f49e52f9b555c878a9..0000000000000000000000000000000000000000
--- a/assets/example_images/c3d714bc125f06ce1187799d5ca10736b4064a24c141e627089aad2bdedf7aa5.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:e211aff13d1e7dcdf592d68ea34e03794c4464c5b159224d7c508d9ecae5eb59
-size 151940
diff --git a/assets/example_images/c9340e744541f310bf89838f652602961d3e5950b31cd349bcbfc7e59e15cd2e.webp b/assets/example_images/c9340e744541f310bf89838f652602961d3e5950b31cd349bcbfc7e59e15cd2e.webp
deleted file mode 100644
index fc11f391f770d92966ef73037c8769f807337875..0000000000000000000000000000000000000000
--- a/assets/example_images/c9340e744541f310bf89838f652602961d3e5950b31cd349bcbfc7e59e15cd2e.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:4533ce41604e7aff386c71f37f0b2727242a4615ef0e37c3cd62273678ad1809
-size 142682
diff --git a/assets/example_images/cd3c309f17eee5ad6afe4e001765893ade20b653f611365c93d158286b4cee96.webp b/assets/example_images/cd3c309f17eee5ad6afe4e001765893ade20b653f611365c93d158286b4cee96.webp
deleted file mode 100644
index aca11640af818062e7fb1077b70bf01da1abaa9f..0000000000000000000000000000000000000000
--- a/assets/example_images/cd3c309f17eee5ad6afe4e001765893ade20b653f611365c93d158286b4cee96.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:6b7c45936606c2f24af55c9e32426c25b819abb8cabf255ff9592f10bf0e3e60
-size 246420
diff --git a/assets/example_images/cdf996a6cc218918eeb90209891ce306a230e6d9cca2a3d9bbb37c6d7b6bd318.webp b/assets/example_images/cdf996a6cc218918eeb90209891ce306a230e6d9cca2a3d9bbb37c6d7b6bd318.webp
deleted file mode 100644
index 6d79bbc33c14ba034cbc6f5dc7ab443e0f92d445..0000000000000000000000000000000000000000
--- a/assets/example_images/cdf996a6cc218918eeb90209891ce306a230e6d9cca2a3d9bbb37c6d7b6bd318.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:eaf705e6205f5101af261fac1a3477121cf377f92ff93feaf506eada70de98d7
-size 217676
diff --git a/assets/example_images/e10465728ebea1e055524f97ac5d47cebf82a672f07a05409aa07d826c9d9f37.webp b/assets/example_images/e10465728ebea1e055524f97ac5d47cebf82a672f07a05409aa07d826c9d9f37.webp
deleted file mode 100644
index a773e5c7e9a3e0078acdf30dbace00de6e22cd18..0000000000000000000000000000000000000000
Binary files a/assets/example_images/e10465728ebea1e055524f97ac5d47cebf82a672f07a05409aa07d826c9d9f37.webp and /dev/null differ
diff --git a/assets/example_images/ee8ecf658fde9c58830c021b2e30d0d5e7e492ef52febe7192a6c74fbf1b0472.webp b/assets/example_images/ee8ecf658fde9c58830c021b2e30d0d5e7e492ef52febe7192a6c74fbf1b0472.webp
deleted file mode 100644
index 1fb0eb8255a61d9c9849f10592663190fb6bade7..0000000000000000000000000000000000000000
--- a/assets/example_images/ee8ecf658fde9c58830c021b2e30d0d5e7e492ef52febe7192a6c74fbf1b0472.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:d3636477a76b626de8749d245f916261b17736f8fd7aa4fe097cd4bbac874d61
-size 228244
diff --git a/assets/example_images/f5332118a0cda9cd13fe13d4be2b00437e702d1f9af51ebb6b75219a572a6ce9.webp b/assets/example_images/f5332118a0cda9cd13fe13d4be2b00437e702d1f9af51ebb6b75219a572a6ce9.webp
deleted file mode 100644
index 3e42c5a81561e46e08ee9693a3bcbc78f487c08d..0000000000000000000000000000000000000000
--- a/assets/example_images/f5332118a0cda9cd13fe13d4be2b00437e702d1f9af51ebb6b75219a572a6ce9.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:0f49599c52ec02ea0b19e5e333dbd48d072c56f4a6300a35756ce50832d59ca6
-size 158132
diff --git a/assets/example_images/f8a7eafe26a4f3ebd26a9e7d0289e4a40b5a93e9234e94ec3e1071c352acc65a.webp b/assets/example_images/f8a7eafe26a4f3ebd26a9e7d0289e4a40b5a93e9234e94ec3e1071c352acc65a.webp
deleted file mode 100644
index 9376c1e27b171e4ef5c2d06d0c84d539c314d161..0000000000000000000000000000000000000000
--- a/assets/example_images/f8a7eafe26a4f3ebd26a9e7d0289e4a40b5a93e9234e94ec3e1071c352acc65a.webp
+++ /dev/null
@@ -1,3 +0,0 @@
-version https://git-lfs.github.com/spec/v1
-oid sha256:3cb85a0afac1465b9033327f3ba8d00f4d8e6a36fef7d39bb4fd4eb22a610385
-size 141104
diff --git a/assets/example_images/f94e2b76494ce2cf1874611273e5fb3d76b395793bb5647492fa85c2ce0a248b.webp b/assets/example_images/f94e2b76494ce2cf1874611273e5fb3d76b395793bb5647492fa85c2ce0a248b.webp
deleted file mode 100644
index 9c24d0eb0a3d0c2c7ddc69e3ae39f4e9d85b9243..0000000000000000000000000000000000000000
Binary files a/assets/example_images/f94e2b76494ce2cf1874611273e5fb3d76b395793bb5647492fa85c2ce0a248b.webp and /dev/null differ
diff --git a/requirements.txt b/requirements.txt
index 13a044091aaaa55a1f75274e84c2320dfb4f6caa..46849ad5eaccfcfd491df9e8ac7c1d18a069327a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -15,7 +15,6 @@ zstandard==0.25.0
kornia==0.8.2
timm==1.0.22
git+https://github.com/EasternJournalist/utils3d.git@9a4eb15e4021b67b12c460c7057d642626897ec8
-https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.3/flash_attn-2.7.3+cu12torch2.6cxx11abiFALSE-cp310-cp310-linux_x86_64.whl
https://github.com/JeffreyXiang/Storages/releases/download/Space_Wheels_251210/flash_attn_3-3.0.0b1-cp39-abi3-linux_x86_64.whl
https://github.com/JeffreyXiang/Storages/releases/download/Space_Wheels_251210/cumesh-0.0.1-cp310-cp310-linux_x86_64.whl
https://github.com/JeffreyXiang/Storages/releases/download/Space_Wheels_251210/flex_gemm-0.0.1-cp310-cp310-linux_x86_64.whl
diff --git a/trellis2/models/__init__.py b/trellis2/models/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/models/sc_vaes/fdg_vae.py b/trellis2/models/sc_vaes/fdg_vae.py
old mode 100755
new mode 100644
diff --git a/trellis2/models/sc_vaes/sparse_unet_vae.py b/trellis2/models/sc_vaes/sparse_unet_vae.py
old mode 100755
new mode 100644
diff --git a/trellis2/models/sparse_elastic_mixin.py b/trellis2/models/sparse_elastic_mixin.py
old mode 100755
new mode 100644
diff --git a/trellis2/models/sparse_structure_flow.py b/trellis2/models/sparse_structure_flow.py
old mode 100755
new mode 100644
diff --git a/trellis2/models/sparse_structure_vae.py b/trellis2/models/sparse_structure_vae.py
old mode 100755
new mode 100644
diff --git a/trellis2/models/structured_latent_flow.py b/trellis2/models/structured_latent_flow.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/attention/__init__.py b/trellis2/modules/attention/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/attention/config.py b/trellis2/modules/attention/config.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/attention/full_attn.py b/trellis2/modules/attention/full_attn.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/attention/modules.py b/trellis2/modules/attention/modules.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/attention/rope.py b/trellis2/modules/attention/rope.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/image_feature_extractor.py b/trellis2/modules/image_feature_extractor.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/norm.py b/trellis2/modules/norm.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/__init__.py b/trellis2/modules/sparse/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/attention/__init__.py b/trellis2/modules/sparse/attention/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/attention/full_attn.py b/trellis2/modules/sparse/attention/full_attn.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/attention/modules.py b/trellis2/modules/sparse/attention/modules.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/attention/rope.py b/trellis2/modules/sparse/attention/rope.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/attention/windowed_attn.py b/trellis2/modules/sparse/attention/windowed_attn.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/basic.py b/trellis2/modules/sparse/basic.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/config.py b/trellis2/modules/sparse/config.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/conv/__init__.py b/trellis2/modules/sparse/conv/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/conv/config.py b/trellis2/modules/sparse/conv/config.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/conv/conv.py b/trellis2/modules/sparse/conv/conv.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/conv/conv_flex_gemm.py b/trellis2/modules/sparse/conv/conv_flex_gemm.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/conv/conv_spconv.py b/trellis2/modules/sparse/conv/conv_spconv.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/conv/conv_torchsparse.py b/trellis2/modules/sparse/conv/conv_torchsparse.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/linear.py b/trellis2/modules/sparse/linear.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/nonlinearity.py b/trellis2/modules/sparse/nonlinearity.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/norm.py b/trellis2/modules/sparse/norm.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/spatial/__init__.py b/trellis2/modules/sparse/spatial/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/spatial/basic.py b/trellis2/modules/sparse/spatial/basic.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/spatial/spatial2channel.py b/trellis2/modules/sparse/spatial/spatial2channel.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/transformer/__init__.py b/trellis2/modules/sparse/transformer/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/transformer/blocks.py b/trellis2/modules/sparse/transformer/blocks.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/sparse/transformer/modulated.py b/trellis2/modules/sparse/transformer/modulated.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/spatial.py b/trellis2/modules/spatial.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/transformer/__init__.py b/trellis2/modules/transformer/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/transformer/blocks.py b/trellis2/modules/transformer/blocks.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/transformer/modulated.py b/trellis2/modules/transformer/modulated.py
old mode 100755
new mode 100644
diff --git a/trellis2/modules/utils.py b/trellis2/modules/utils.py
old mode 100755
new mode 100644
diff --git a/trellis2/pipelines/__init__.py b/trellis2/pipelines/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/pipelines/base.py b/trellis2/pipelines/base.py
old mode 100755
new mode 100644
diff --git a/trellis2/pipelines/rembg/BiRefNet.py b/trellis2/pipelines/rembg/BiRefNet.py
old mode 100755
new mode 100644
diff --git a/trellis2/pipelines/rembg/__init__.py b/trellis2/pipelines/rembg/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/pipelines/samplers/__init__.py b/trellis2/pipelines/samplers/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/pipelines/samplers/base.py b/trellis2/pipelines/samplers/base.py
old mode 100755
new mode 100644
diff --git a/trellis2/pipelines/samplers/classifier_free_guidance_mixin.py b/trellis2/pipelines/samplers/classifier_free_guidance_mixin.py
old mode 100755
new mode 100644
diff --git a/trellis2/pipelines/samplers/flow_euler.py b/trellis2/pipelines/samplers/flow_euler.py
old mode 100755
new mode 100644
diff --git a/trellis2/pipelines/samplers/guidance_interval_mixin.py b/trellis2/pipelines/samplers/guidance_interval_mixin.py
old mode 100755
new mode 100644
diff --git a/trellis2/pipelines/trellis2_image_to_3d.py b/trellis2/pipelines/trellis2_image_to_3d.py
old mode 100755
new mode 100644
index 8f33344e1def62179d7ad96000bb6a39bb439d96..8d7afd59cff2daaf2060c10a050dd1506bf6bebe
--- a/trellis2/pipelines/trellis2_image_to_3d.py
+++ b/trellis2/pipelines/trellis2_image_to_3d.py
@@ -1,5 +1,4 @@
from typing import *
-import spaces
import torch
import torch.nn as nn
import numpy as np
diff --git a/trellis2/renderers/__init__.py b/trellis2/renderers/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/renderers/mesh_renderer.py b/trellis2/renderers/mesh_renderer.py
old mode 100755
new mode 100644
diff --git a/trellis2/renderers/pbr_mesh_renderer.py b/trellis2/renderers/pbr_mesh_renderer.py
old mode 100755
new mode 100644
index cfea1bd59f5742f73be9dacfb749fb4ac197511b..876378fd6bba32b9dcb75cf84a3630f1d3d84026
--- a/trellis2/renderers/pbr_mesh_renderer.py
+++ b/trellis2/renderers/pbr_mesh_renderer.py
@@ -90,6 +90,77 @@ def intrinsics_to_projection(
return ret
+def screen_space_ambient_occlusion(
+ depth: torch.Tensor,
+ normal: torch.Tensor,
+ perspective: torch.Tensor,
+ radius: float = 0.1,
+ bias: float = 1e-6,
+ samples: int = 64,
+ intensity: float = 1.0,
+) -> torch.Tensor:
+ """
+ Screen space ambient occlusion (SSAO)
+
+ Args:
+ depth (torch.Tensor): [H, W, 1] depth image
+ normal (torch.Tensor): [H, W, 3] normal image
+ perspective (torch.Tensor): [4, 4] camera projection matrix
+ radius (float): radius of the SSAO kernel
+ bias (float): bias to avoid self-occlusion
+ samples (int): number of samples to use for the SSAO kernel
+ intensity (float): intensity of the SSAO effect
+ Returns:
+ (torch.Tensor): [H, W, 1] SSAO image
+ """
+ device = depth.device
+ H, W, _ = depth.shape
+
+ fx = perspective[0, 0]
+ fy = perspective[1, 1]
+ cx = perspective[0, 2]
+ cy = perspective[1, 2]
+
+ y_grid, x_grid = torch.meshgrid(
+ (torch.arange(H, device=device) + 0.5) / H * 2 - 1,
+ (torch.arange(W, device=device) + 0.5) / W * 2 - 1,
+ indexing='ij'
+ )
+ x_view = (x_grid.float() - cx) * depth[..., 0] / fx
+ y_view = (y_grid.float() - cy) * depth[..., 0] / fy
+ view_pos = torch.stack([x_view, y_view, depth[..., 0]], dim=-1) # [H, W, 3]
+
+ depth_feat = depth.permute(2, 0, 1).unsqueeze(0)
+ occlusion = torch.zeros((H, W), device=device)
+
+ # start sampling
+ for _ in range(samples):
+ # sample normal distribution, if inside, flip the sign
+ rnd_vec = torch.randn(H, W, 3, device=device)
+ rnd_vec = F.normalize(rnd_vec, p=2, dim=-1)
+ dot_val = torch.sum(rnd_vec * normal, dim=-1, keepdim=True)
+ sample_dir = torch.sign(dot_val) * rnd_vec
+ scale = torch.rand(H, W, 1, device=device)
+ scale = scale * scale
+ sample_pos = view_pos + sample_dir * radius * scale
+ sample_z = sample_pos[..., 2]
+
+ # project to screen space
+ z_safe = torch.clamp(sample_pos[..., 2], min=1e-5)
+ proj_u = (sample_pos[..., 0] * fx / z_safe) + cx
+ proj_v = (sample_pos[..., 1] * fy / z_safe) + cy
+ grid = torch.stack([proj_u, proj_v], dim=-1).unsqueeze(0)
+ geo_z = F.grid_sample(depth_feat, grid, mode='nearest', padding_mode='border').squeeze()
+ range_check = torch.abs(geo_z - sample_z) < radius
+ is_occluded = (geo_z <= sample_z - bias) & range_check
+ occlusion += is_occluded.float()
+
+ f_occ = occlusion / samples * intensity
+ f_occ = torch.clamp(f_occ, 0.0, 1.0)
+
+ return f_occ.unsqueeze(-1)
+
+
def aces_tonemapping(x: torch.Tensor) -> torch.Tensor:
"""
Applies ACES tone mapping curve to an HDR image tensor.
@@ -143,7 +214,8 @@ class PbrMeshRenderer:
mesh : Mesh,
extrinsics: torch.Tensor,
intrinsics: torch.Tensor,
- envmap : EnvMap,
+ envmap : Union[EnvMap, Dict[str, EnvMap]],
+ use_envmap_bg : bool = False,
transformation : Optional[torch.Tensor] = None
) -> edict:
"""
@@ -153,7 +225,8 @@ class PbrMeshRenderer:
mesh : meshmodel
extrinsics (torch.Tensor): (4, 4) camera extrinsics
intrinsics (torch.Tensor): (3, 3) camera intrinsics
- envmap : EnvMap
+ envmap (Union[EnvMap, Dict[str, EnvMap]]): environment map or a dictionary of environment maps
+ use_envmap_bg (bool): whether to use envmap as background
transformation (torch.Tensor): (4, 4) transformation matrix
Returns:
@@ -167,6 +240,10 @@ class PbrMeshRenderer:
if 'dr' not in globals():
import nvdiffrast.torch as dr
+ if not isinstance(envmap, dict):
+ envmap = {'' : envmap}
+ num_envmaps = len(envmap)
+
resolution = self.rendering_options["resolution"]
near = self.rendering_options["near"]
far = self.rendering_options["far"]
@@ -192,6 +269,7 @@ class PbrMeshRenderer:
if transformation is not None:
vertices_homo = torch.bmm(vertices_homo, transformation.unsqueeze(0).transpose(-1, -2))
vertices = vertices_homo[..., :3].contiguous()
+ vertices_camera = torch.bmm(vertices_homo, extrinsics.transpose(-1, -2))
vertices_clip = torch.bmm(vertices_homo, full_proj.transpose(-1, -2))
faces = mesh.faces
@@ -204,7 +282,10 @@ class PbrMeshRenderer:
face_normal = F.normalize(face_normal, dim=1)
out_dict = edict()
- shaded = torch.zeros((resolution * ssaa, resolution * ssaa, 3), dtype=torch.float32, device=self.device)
+ shaded = torch.zeros((num_envmaps, resolution * ssaa, resolution * ssaa, 3), dtype=torch.float32, device=self.device)
+ depth = torch.full((resolution * ssaa, resolution * ssaa, 1), 1e10, dtype=torch.float32, device=self.device)
+ normal = torch.zeros((resolution * ssaa, resolution * ssaa, 3), dtype=torch.float32, device=self.device)
+ max_w = torch.zeros((resolution * ssaa, resolution * ssaa, 1), dtype=torch.float32, device=self.device)
alpha = torch.zeros((resolution * ssaa, resolution * ssaa, 1), dtype=torch.float32, device=self.device)
with dr.DepthPeeler(self.glctx, vertices_clip, faces, (resolution * ssaa, resolution * ssaa)) as peeler:
for _ in range(self.rendering_options["peel_layers"]):
@@ -212,6 +293,9 @@ class PbrMeshRenderer:
# Pos
pos = dr.interpolate(vertices, rast, faces)[0][0]
+
+ # Depth
+ gb_depth = dr.interpolate(vertices_camera[..., 2:3].contiguous(), rast, faces)[0][0]
# Normal
gb_normal = dr.interpolate(face_normal.unsqueeze(0), rast, torch.arange(face_normal.shape[0], dtype=torch.int, device=self.device).unsqueeze(1).repeat(1, 3).contiguous())[0][0]
@@ -220,10 +304,9 @@ class PbrMeshRenderer:
-gb_normal,
gb_normal
)
+ gb_cam_normal = (extrinsics[..., :3, :3].reshape(1, 1, 3, 3) @ gb_normal.unsqueeze(-1)).squeeze(-1)
if _ == 0:
- cam_normal = extrinsics[..., :3, :3].reshape(1, 1, 3, 3) @ gb_normal.unsqueeze(-1)
- cam_normal = -cam_normal.squeeze(-1) * 0.5 + 0.5
- out_dict.normal = cam_normal
+ out_dict.normal = -gb_cam_normal * 0.5 + 0.5
mask = (rast[0, ..., -1:] > 0).float()
out_dict.mask = mask
@@ -344,25 +427,41 @@ class PbrMeshRenderer:
gb_roughness,
gb_metallic,
], dim=-1)
- gb_shaded = envmap.shade(
- pos.unsqueeze(0),
- gb_normal.unsqueeze(0),
- gb_basecolor.unsqueeze(0),
- gb_orm.unsqueeze(0),
- rays_o,
- specular=True,
- )[0]
+ gb_shaded = torch.stack([
+ e.shade(
+ pos.unsqueeze(0),
+ gb_normal.unsqueeze(0),
+ gb_basecolor.unsqueeze(0),
+ gb_orm.unsqueeze(0),
+ rays_o,
+ specular=True,
+ )[0]
+ for e in envmap.values()
+ ], dim=0)
- # Alpha blend
+ # Compositing
w = (1 - alpha) * gb_alpha
+ depth = torch.where(w > max_w, gb_depth, depth)
+ normal = torch.where(w > max_w, gb_cam_normal, normal)
+ max_w = torch.maximum(max_w, w)
shaded += w * gb_shaded
alpha += w
+
+ # Ambient occulusion
+ f_occ = screen_space_ambient_occlusion(
+ depth, normal, perspective, intensity=1.5
+ )
+ shaded *= (1 - f_occ)
+ out_dict.clay = (1 - f_occ)
# Background
- bg = envmap.sample(rays_d)
- shaded += (1 - alpha) * bg
+ if use_envmap_bg:
+ bg = torch.stack([e.sample(rays_d) for e in envmap.values()], dim=0)
+ shaded += (1 - alpha) * bg
- out_dict.shaded = shaded
+ for i, k in enumerate(envmap.keys()):
+ shaded_key = f"shaded_{k}" if k != '' else "shaded"
+ out_dict[shaded_key] = shaded[i]
# SSAA
for k in out_dict.keys():
@@ -373,7 +472,9 @@ class PbrMeshRenderer:
out_dict[k] = out_dict[k].squeeze()
# Post processing
- out_dict.shaded = aces_tonemapping(out_dict.shaded)
- out_dict.shaded = gamma_correction(out_dict.shaded)
+ for k in envmap.keys():
+ shaded_key = f"shaded_{k}" if k != '' else "shaded"
+ out_dict[shaded_key] = aces_tonemapping(out_dict[shaded_key])
+ out_dict[shaded_key] = gamma_correction(out_dict[shaded_key])
return out_dict
diff --git a/trellis2/renderers/voxel_renderer.py b/trellis2/renderers/voxel_renderer.py
old mode 100755
new mode 100644
diff --git a/trellis2/representations/__init__.py b/trellis2/representations/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/representations/mesh/__init__.py b/trellis2/representations/mesh/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/representations/mesh/base.py b/trellis2/representations/mesh/base.py
old mode 100755
new mode 100644
diff --git a/trellis2/representations/voxel/__init__.py b/trellis2/representations/voxel/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/representations/voxel/voxel_model.py b/trellis2/representations/voxel/voxel_model.py
old mode 100755
new mode 100644
diff --git a/trellis2/utils/__init__.py b/trellis2/utils/__init__.py
old mode 100755
new mode 100644
diff --git a/trellis2/utils/data_utils.py b/trellis2/utils/data_utils.py
old mode 100755
new mode 100644
diff --git a/trellis2/utils/dist_utils.py b/trellis2/utils/dist_utils.py
old mode 100755
new mode 100644
diff --git a/trellis2/utils/elastic_utils.py b/trellis2/utils/elastic_utils.py
old mode 100755
new mode 100644
diff --git a/trellis2/utils/general_utils.py b/trellis2/utils/general_utils.py
old mode 100755
new mode 100644
diff --git a/trellis2/utils/grad_clip_utils.py b/trellis2/utils/grad_clip_utils.py
old mode 100755
new mode 100644
diff --git a/trellis2/utils/loss_utils.py b/trellis2/utils/loss_utils.py
old mode 100755
new mode 100644
diff --git a/trellis2/utils/mesh_utils.py b/trellis2/utils/mesh_utils.py
old mode 100755
new mode 100644
diff --git a/trellis2/utils/random_utils.py b/trellis2/utils/random_utils.py
old mode 100755
new mode 100644
diff --git a/trellis2/utils/render_utils.py b/trellis2/utils/render_utils.py
old mode 100755
new mode 100644
index 28b0dd26d4faafd06c9da67699270be4573bd5cc..d6fb3e9c3b0d826f77ff56d65ccf4adbc247cf0d
--- a/trellis2/utils/render_utils.py
+++ b/trellis2/utils/render_utils.py
@@ -97,11 +97,11 @@ def render_multiview(sample, resolution=512, nviews=30):
return res['color'], extrinsics, intrinsics
-def render_snapshot(samples, resolution=512, bg_color=(0, 0, 0), offset=(-16 / 180 * np.pi, 20 / 180 * np.pi), r=10, fov=8, **kwargs):
- yaw = [0, np.pi/2, np.pi, 3*np.pi/2]
+def render_snapshot(samples, resolution=512, bg_color=(0, 0, 0), offset=(-16 / 180 * np.pi, 20 / 180 * np.pi), r=10, fov=8, nviews=4, **kwargs):
+ yaw = np.linspace(0, 2 * np.pi, nviews, endpoint=False)
yaw_offset = offset[0]
yaw = [y + yaw_offset for y in yaw]
- pitch = [offset[1] for _ in range(4)]
+ pitch = [offset[1] for _ in range(nviews)]
extrinsics, intrinsics = yaw_pitch_r_fov_to_extrinsics_intrinsics(yaw, pitch, r, fov)
return render_frames(samples, extrinsics, intrinsics, {'resolution': resolution, 'bg_color': bg_color}, **kwargs)
diff --git a/trellis2/utils/vis_utils.py b/trellis2/utils/vis_utils.py
old mode 100755
new mode 100644