File size: 4,858 Bytes
f533e29
360b167
f7930d0
50575f2
f533e29
f7930d0
360b167
50575f2
f7930d0
50575f2
f533e29
f7930d0
 
 
360b167
2abdf7e
48e8017
f7930d0
50575f2
 
360b167
f7930d0
 
 
 
 
48e8017
f7930d0
360b167
f533e29
360b167
50575f2
 
 
5bd5a9b
6b8b5fa
50575f2
 
 
 
 
 
 
f7930d0
 
5bd5a9b
 
f7930d0
 
 
48e8017
 
50575f2
48e8017
5bd5a9b
48e8017
 
 
 
50575f2
 
5bd5a9b
b4f7e94
 
48e8017
b4f7e94
360b167
5bd5a9b
48e8017
 
 
 
 
50575f2
48e8017
50575f2
5bd5a9b
 
48e8017
 
 
50575f2
 
48e8017
50575f2
48e8017
 
 
 
 
5bd5a9b
48e8017
50575f2
 
 
 
 
 
 
48e8017
50575f2
48e8017
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50575f2
 
48e8017
50575f2
360b167
f7930d0
5bd5a9b
 
 
 
 
 
48e8017
5bd5a9b
 
50575f2
5bd5a9b
50575f2
48e8017
 
 
 
50575f2
48e8017
f533e29
50575f2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import gradio as gr
import torch
from transformers import AutoTokenizer
from modeling import BERTMultiLabel

LABELS = ["anger", "fear", "joy", "sadness", "surprise"]

# Load tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("./")
model = BERTMultiLabel("microsoft/deberta-v3-base", num_labels=len(LABELS))

state = torch.load("pytorch_model.bin", map_location="cpu")
model.load_state_dict(state)
model.eval()


# ---------------- PREDICTION FUNCTION ---------------- #
def predict(text):
    if not text.strip():
        return {"error": "Please enter text."}

    enc = tokenizer(
        text,
        truncation=True,
        padding="max_length",
        max_length=128,
        return_tensors="pt"
    )

    with torch.no_grad():
        logits = model(enc["input_ids"], enc["attention_mask"])
        probs = torch.sigmoid(logits)[0].tolist()

    scores = {label: round(p, 4) for label, p in zip(LABELS, probs)}
    mood = LABELS[int(torch.tensor(probs).argmax())]

    emoji_map = {
        "anger": "๐Ÿ˜ก",
        "fear": "๐Ÿ˜จ",
        "joy": "๐Ÿ˜Š",
        "sadness": "๐Ÿ˜ข",
        "surprise": "๐Ÿ˜ฎ",
    }

    return {
        "Predicted Mood": f"{emoji_map[mood]} {mood.capitalize()}",
        "Scores": scores,
    }


# ---------------- UI LAYOUT ---------------- #
with gr.Blocks(title="Mood Detection of the User - DeBERTa") as demo:

    gr.Markdown("""
        <div style="text-align:center;">
            <h1 style="font-size:3rem;">๐ŸŽญ Emotion Detection with DeBERTa-v3</h1>
            <p style="font-size:1.1rem; color:#555;">
                Multi-label emotion classification powered by DeBERTa-v3 <br>
                Trained on IIT Madras Deep Learning & GenAI Dataset (2025)
            </p>
        </div>
        <br>
    """)

    with gr.Row():

        with gr.Column(scale=1):

            gr.HTML("""
                <div style="
                    background:white; padding:20px; border-radius:14px;
                    box-shadow:0 2px 12px rgba(0,0,0,0.08); margin-bottom:20px;
                ">
                    <h2>๐Ÿ“Œ Model Overview</h2>
                    <ul style="line-height:1.6;">
                        <li><b>Architecture:</b> DeBERTa-v3 Base</li>
                        <li><b>Task:</b> Multi-label Emotion Detection</li>
                        <li><b>Labels:</b> Anger, Fear, Joy, Sadness, Surprise</li>
                        <li><b>Training:</b> AdamW + BCEWithLogitsLoss</li>
                        <li><b>Sequence Length:</b> 128 tokens</li>
                        <li><b>Framework:</b> PyTorch + Transformers</li>
                    </ul>
                </div>
            """)

            gr.HTML("""
                <div style="
                    background:white; padding:20px; border-radius:14px;
                    box-shadow:0 2px 12px rgba(0,0,0,0.08); margin-bottom:20px;
                ">
                    <h2>๐Ÿ“š Dataset Details</h2>
                    <p>Dataset: IIT Madras DL-GenAI Multi-Label Emotion Dataset</p>
                    <ul>
                        <li>๐Ÿ˜  Anger</li>
                        <li>๐Ÿ˜จ Fear</li>
                        <li>๐Ÿ˜Š Joy</li>
                        <li>๐Ÿ˜ข Sadness</li>
                        <li>๐Ÿ˜ฒ Surprise</li>
                    </ul>
                    <p><b>Metric:</b> Macro F1 Score</p>
                </div>
            """)

            gr.HTML("""
                <div style="
                    background:white; padding:20px; border-radius:14px;
                    box-shadow:0 2px 12px rgba(0,0,0,0.08);
                ">
                    <h2>๐Ÿ† Competition Summary</h2>
                    <ul style="line-height:1.6;">
                        <li><b>Platform:</b> Kaggle Private Competition</li>
                        <li><b>Course:</b> IIT Madras - Deep Learning & GenAI</li>
                        <li><b>Final Rank:</b> 27 / 200 Participants</li>
                        <li><b>Public LB:</b> 87.8% Macro F1</li>
                        <li><b>Private LB:</b> 87.0% Macro F1</li>
                        <li><b>Models Attempted:</b> CNN | GRU | BiLSTM | DistilBERT | DeBERTa</li>
                    </ul>
                </div>
            """)

        with gr.Column(scale=2):

            input_box = gr.Textbox(
                label="Enter your text",
                placeholder="Example: I feel amazing today! ๐ŸŽ‰",
                lines=4,
            )

            btn = gr.Button("๐ŸŽฏ Analyze Emotion", elem_id="analyze-button")

            output = gr.JSON(label="Model Output")

            btn.click(predict, inputs=input_box, outputs=output)

    gr.Markdown("""
        <br>
        <p style="text-align:center; color:#777;">
            Built by <b>Ayusman Samasi</b> โ€ข IIT Madras Deep Learning & GenAI
        </p>
    """)

demo.launch()