Duplicate from Xenova/modnet
Browse filesCo-authored-by: Joshua <[email protected]>
- .gitattributes +35 -0
- README.md +66 -0
- config.json +6 -0
- onnx/model.onnx +3 -0
- onnx/model_bnb4.onnx +3 -0
- onnx/model_fp16.onnx +3 -0
- onnx/model_q4.onnx +3 -0
- onnx/model_q4f16.onnx +3 -0
- onnx/model_quantized.onnx +3 -0
- onnx/model_uint8.onnx +3 -0
- preprocessor_config.json +23 -0
- quantize_config.json +30 -0
.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: transformers.js
|
| 3 |
+
tags:
|
| 4 |
+
- vision
|
| 5 |
+
- background-removal
|
| 6 |
+
- portrait-matting
|
| 7 |
+
license: apache-2.0
|
| 8 |
+
pipeline_tag: image-segmentation
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
# MODNet: Trimap-Free Portrait Matting in Real Time
|
| 12 |
+
|
| 13 |
+

|
| 14 |
+
|
| 15 |
+
For more information, check out the official [repository](https://github.com/ZHKKKe/MODNet) and example [colab](https://colab.research.google.com/drive/1P3cWtg8fnmu9karZHYDAtmm1vj1rgA-f?usp=sharing).
|
| 16 |
+
|
| 17 |
+
## Usage (Transformers.js)
|
| 18 |
+
|
| 19 |
+
If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@huggingface/transformers) using:
|
| 20 |
+
```bash
|
| 21 |
+
npm i @huggingface/transformers
|
| 22 |
+
```
|
| 23 |
+
|
| 24 |
+
You can then use the model for portrait matting, as follows:
|
| 25 |
+
|
| 26 |
+
```js
|
| 27 |
+
import { pipeline } from '@huggingface/transformers';
|
| 28 |
+
|
| 29 |
+
const segmenter = await pipeline('background-removal', 'Xenova/modnet', { dtype: 'fp32' });
|
| 30 |
+
const url = 'https://images.pexels.com/photos/5965592/pexels-photo-5965592.jpeg?auto=compress&cs=tinysrgb&w=1024';
|
| 31 |
+
const output = await segmenter(url);
|
| 32 |
+
output[0].save('mask.png');
|
| 33 |
+
// You can also use `output[0].toCanvas()` or `await output[0].toBlob()` if you would like to access the output without saving.
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
Or with the `AutoModel` and `AutoProcessor` APIs:
|
| 37 |
+
|
| 38 |
+
```js
|
| 39 |
+
import { AutoModel, AutoProcessor, RawImage } from '@huggingface/transformers';
|
| 40 |
+
|
| 41 |
+
// Load model and processor
|
| 42 |
+
const model = await AutoModel.from_pretrained('Xenova/modnet', { dtype: 'fp32' });
|
| 43 |
+
const processor = await AutoProcessor.from_pretrained('Xenova/modnet');
|
| 44 |
+
|
| 45 |
+
// Load image from URL
|
| 46 |
+
const url = 'https://images.pexels.com/photos/5965592/pexels-photo-5965592.jpeg?auto=compress&cs=tinysrgb&w=1024';
|
| 47 |
+
const image = await RawImage.fromURL(url);
|
| 48 |
+
|
| 49 |
+
// Pre-process image
|
| 50 |
+
const { pixel_values } = await processor(image);
|
| 51 |
+
|
| 52 |
+
// Predict alpha matte
|
| 53 |
+
const { output } = await model({ input: pixel_values });
|
| 54 |
+
|
| 55 |
+
// Save output mask
|
| 56 |
+
const mask = await RawImage.fromTensor(output[0].mul(255).to('uint8')).resize(image.width, image.height);
|
| 57 |
+
mask.save('mask.png');
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
| Input image | Output mask |
|
| 61 |
+
|--------|--------|
|
| 62 |
+
|  |  |
|
| 63 |
+
|
| 64 |
+
---
|
| 65 |
+
|
| 66 |
+
Note: Having a separate repo for ONNX weights is intended to be a temporary solution until WebML gains more traction. If you would like to make your models web-ready, we recommend converting to ONNX using [🤗 Optimum](https://huggingface.co/docs/optimum/index) and structuring your repo like this one (with ONNX weights located in a subfolder named `onnx`).
|
config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model_type": "modnet",
|
| 3 |
+
"transformers.js_config": {
|
| 4 |
+
"dtype": "fp32"
|
| 5 |
+
}
|
| 6 |
+
}
|
onnx/model.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:07c308cf0fc7e6e8b2065a12ed7fc07e1de8febb7dc7839d7b7f15dd66584df9
|
| 3 |
+
size 25888640
|
onnx/model_bnb4.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:acfa94a3b90259d00aed9355be69def816837476307adf59cb5631006946a461
|
| 3 |
+
size 23080899
|
onnx/model_fp16.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:25f165da9bfd30830a575f1f0490f1acd995975cb349bc02f3d79332e1fe5cf6
|
| 3 |
+
size 12984781
|
onnx/model_q4.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:74b7b569f8697e220b82986ca85c2226bec93c67e748276e2cec4e8e742a312a
|
| 3 |
+
size 23132083
|
onnx/model_q4f16.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f582407530e8c5f2f4adc969d894cce82a2ae3a6b6b0571fe25ebb4c5a8ffa5c
|
| 3 |
+
size 11801931
|
onnx/model_quantized.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:92e49898c3e05a6d7a944fc67a8cb87c4aad754ffb6ebd949528c7d1105fee3a
|
| 3 |
+
size 6632188
|
onnx/model_uint8.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7bad6522b3cde60246e69e234b7786337ef9c88abc790ee5c1aaa6e535b0c61d
|
| 3 |
+
size 6627048
|
preprocessor_config.json
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"do_normalize": true,
|
| 3 |
+
"do_pad": false,
|
| 4 |
+
"do_rescale": true,
|
| 5 |
+
"do_resize": true,
|
| 6 |
+
"image_mean": [
|
| 7 |
+
0.5,
|
| 8 |
+
0.5,
|
| 9 |
+
0.5
|
| 10 |
+
],
|
| 11 |
+
"feature_extractor_type": "ImageFeatureExtractor",
|
| 12 |
+
"image_std": [
|
| 13 |
+
0.5,
|
| 14 |
+
0.5,
|
| 15 |
+
0.5
|
| 16 |
+
],
|
| 17 |
+
"resample": 2,
|
| 18 |
+
"rescale_factor": 0.00392156862745098,
|
| 19 |
+
"size": {
|
| 20 |
+
"shortest_edge": 512
|
| 21 |
+
},
|
| 22 |
+
"size_divisibility": 32
|
| 23 |
+
}
|
quantize_config.json
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"per_channel": false,
|
| 3 |
+
"reduce_range": false,
|
| 4 |
+
"per_model_config": {
|
| 5 |
+
"model": {
|
| 6 |
+
"op_types": [
|
| 7 |
+
"Sigmoid",
|
| 8 |
+
"Constant",
|
| 9 |
+
"Resize",
|
| 10 |
+
"Gather",
|
| 11 |
+
"MatMul",
|
| 12 |
+
"Clip",
|
| 13 |
+
"BatchNormalization",
|
| 14 |
+
"Concat",
|
| 15 |
+
"Conv",
|
| 16 |
+
"GlobalAveragePool",
|
| 17 |
+
"Expand",
|
| 18 |
+
"Add",
|
| 19 |
+
"Slice",
|
| 20 |
+
"Shape",
|
| 21 |
+
"Unsqueeze",
|
| 22 |
+
"Reshape",
|
| 23 |
+
"InstanceNormalization",
|
| 24 |
+
"Relu",
|
| 25 |
+
"Mul"
|
| 26 |
+
],
|
| 27 |
+
"weight_type": "QUInt8"
|
| 28 |
+
}
|
| 29 |
+
}
|
| 30 |
+
}
|