Datasets:
Upload folder using huggingface_hub
Browse files- README.md +23 -11
- manifest.json +2 -4
- test.parquet +2 -2
- vistoolbench_1204.parquet +3 -0
README.md
CHANGED
|
@@ -1,3 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# VisToolBench Dataset
|
| 2 |
|
| 3 |
A benchmark dataset for evaluating vision-language models on tool-use tasks.
|
|
@@ -17,15 +27,13 @@ A benchmark dataset for evaluating vision-language models on tool-use tasks.
|
|
| 17 |
| `num_turns` | int | Number of conversation turns (1 for single-turn) |
|
| 18 |
| `prompt_category` | string | Task category (e.g., "medical", "scientific", "general") |
|
| 19 |
| `eval_focus` | string | What aspect is being evaluated (e.g., "visual_reasoning", "tool_use") |
|
| 20 |
-
| `image` | Image | Preview image (so HF viewer always shows an image) |
|
| 21 |
| `turn_prompts` | List[string] | Per-turn prompts (single-turn → list of length 1) |
|
| 22 |
-
| `turn_images` | List[Image] | Per-turn images (single-turn → list of length 1) |
|
| 23 |
| `turn_golden_answers` | List[string] | Per-turn golden answers |
|
| 24 |
| `turn_tool_trajectories` | List[string] | Per-turn tool trajectories (JSON strings) |
|
| 25 |
| `rubrics_by_turn` | List[string] | Per-turn rubric dicts as JSON strings (includes weights + metadata) |
|
| 26 |
-
| `
|
| 27 |
-
| `
|
| 28 |
-
| `
|
| 29 |
|
| 30 |
## Rubrics Format
|
| 31 |
|
|
@@ -49,7 +57,8 @@ ds = load_dataset("path/to/dataset")
|
|
| 49 |
# Access a sample
|
| 50 |
sample = ds['test'][0]
|
| 51 |
print(sample['turn_prompts']) # list[str]
|
| 52 |
-
print(sample['
|
|
|
|
| 53 |
|
| 54 |
# Parse rubrics for turn 1
|
| 55 |
import json
|
|
@@ -62,10 +71,13 @@ for rubric_id, rubric in turn1_rubrics.items():
|
|
| 62 |
|
| 63 |
- `test`: Full dataset (1204 samples)
|
| 64 |
|
| 65 |
-
## License
|
| 66 |
-
|
| 67 |
-
[Specify license here]
|
| 68 |
-
|
| 69 |
## Citation
|
| 70 |
|
| 71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
pretty_name: VisualToolBench
|
| 3 |
+
tags:
|
| 4 |
+
- vision
|
| 5 |
+
- multimodal
|
| 6 |
+
- tool-use
|
| 7 |
+
task_categories:
|
| 8 |
+
- visual-question-answering
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
# VisToolBench Dataset
|
| 12 |
|
| 13 |
A benchmark dataset for evaluating vision-language models on tool-use tasks.
|
|
|
|
| 27 |
| `num_turns` | int | Number of conversation turns (1 for single-turn) |
|
| 28 |
| `prompt_category` | string | Task category (e.g., "medical", "scientific", "general") |
|
| 29 |
| `eval_focus` | string | What aspect is being evaluated (e.g., "visual_reasoning", "tool_use") |
|
|
|
|
| 30 |
| `turn_prompts` | List[string] | Per-turn prompts (single-turn → list of length 1) |
|
|
|
|
| 31 |
| `turn_golden_answers` | List[string] | Per-turn golden answers |
|
| 32 |
| `turn_tool_trajectories` | List[string] | Per-turn tool trajectories (JSON strings) |
|
| 33 |
| `rubrics_by_turn` | List[string] | Per-turn rubric dicts as JSON strings (includes weights + metadata) |
|
| 34 |
+
| `images` | List[Image] | Flat list of all images (HF viewer shows these) |
|
| 35 |
+
| `images_by_turn` | List[List[Image]] | Images grouped by turn (to know which image belongs to which turn) |
|
| 36 |
+
| `num_images` | int | Total images in `images` |
|
| 37 |
|
| 38 |
## Rubrics Format
|
| 39 |
|
|
|
|
| 57 |
# Access a sample
|
| 58 |
sample = ds['test'][0]
|
| 59 |
print(sample['turn_prompts']) # list[str]
|
| 60 |
+
print(sample['images'][0]) # PIL Image (first image overall)
|
| 61 |
+
print(sample['images_by_turn'][0]) # list of PIL Images for turn 1
|
| 62 |
|
| 63 |
# Parse rubrics for turn 1
|
| 64 |
import json
|
|
|
|
| 71 |
|
| 72 |
- `test`: Full dataset (1204 samples)
|
| 73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 74 |
## Citation
|
| 75 |
|
| 76 |
+
```bibtex
|
| 77 |
+
@article{guo2025beyond,
|
| 78 |
+
title={Beyond seeing: Evaluating multimodal llms on tool-enabled image perception, transformation, and reasoning},
|
| 79 |
+
author={Guo, Xingang and Tyagi, Utkarsh and Gosai, Advait and Vergara, Paula and Park, Jayeon and Montoya, Ernesto Gabriel Hern{\'a}ndez and Zhang, Chen Bo Calvin and Hu, Bin and He, Yunzhong and Liu, Bing and others},
|
| 80 |
+
journal={arXiv preprint arXiv:2510.12712},
|
| 81 |
+
year={2025}
|
| 82 |
+
}
|
| 83 |
+
```
|
manifest.json
CHANGED
|
@@ -12,15 +12,13 @@
|
|
| 12 |
"num_turns",
|
| 13 |
"prompt_category",
|
| 14 |
"eval_focus",
|
| 15 |
-
"image",
|
| 16 |
"turn_prompts",
|
| 17 |
-
"turn_images",
|
| 18 |
"turn_golden_answers",
|
| 19 |
"turn_tool_trajectories",
|
| 20 |
"rubrics_by_turn",
|
| 21 |
"images",
|
| 22 |
-
"
|
| 23 |
"num_images"
|
| 24 |
],
|
| 25 |
-
"out_parquet": "
|
| 26 |
}
|
|
|
|
| 12 |
"num_turns",
|
| 13 |
"prompt_category",
|
| 14 |
"eval_focus",
|
|
|
|
| 15 |
"turn_prompts",
|
|
|
|
| 16 |
"turn_golden_answers",
|
| 17 |
"turn_tool_trajectories",
|
| 18 |
"rubrics_by_turn",
|
| 19 |
"images",
|
| 20 |
+
"images_by_turn",
|
| 21 |
"num_images"
|
| 22 |
],
|
| 23 |
+
"out_parquet": "hf_upload_final_corrected/test.parquet"
|
| 24 |
}
|
test.parquet
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d7910a5046a7c6b037d518515208bac3cb7ec4ad272340c6ed974986b3be4da6
|
| 3 |
+
size 7812249877
|
vistoolbench_1204.parquet
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a7717b54aeaf237de5c26a334e7049e94aba00988bf0e3d4bf989e22fe91cb87
|
| 3 |
+
size 5981789093
|