-
Notifications
You must be signed in to change notification settings - Fork 47
Expand file tree
/
Copy pathmodeling.py
More file actions
74 lines (65 loc) · 2.88 KB
/
modeling.py
File metadata and controls
74 lines (65 loc) · 2.88 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import torch.nn as nn
from transformers import (
BertPreTrainedModel,
BertModel,
AlbertPreTrainedModel,
AlbertModel,
DistilBertPreTrainedModel,
DistilBertModel,
)
class BertForSentimentClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
# BERT.
self.bert = BertModel(config)
# Classification layer, which takes [CLS] representation and outputs logits.
self.cls_layer = nn.Linear(config.hidden_size, 1)
def forward(self, input_ids, attention_mask):
"""
Inputs:
-input_ids : Tensor of shape [B, T] containing token ids of sequences
-attention_mask : Tensor of shape [B, T] containing attention masks to be used to avoid contibution of PAD tokens
(where B is the batch size and T is the input length)
"""
# Feed input to BERT and obtain outputs.
outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
# Obtain representations of [CLS] heads.
cls_reps = outputs.last_hidden_state[:, 0]
# Put these representations to classification layer to obtain logits.
logits = self.cls_layer(cls_reps)
# Return logits.
return logits
class AlbertForSentimentClassification(AlbertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
# ALBERT.
self.albert = AlbertModel(config)
self.cls_layer = nn.Linear(config.hidden_size, 1)
def forward(self, input_ids, attention_mask):
"""
Inputs:
-input_ids : Tensor of shape [B, T] containing token ids of sequences
-attention_mask : Tensor of shape [B, T] containing attention masks to be used to avoid contibution of PAD tokens
(where B is the batch size and T is the input length)
"""
outputs = self.albert(input_ids=input_ids, attention_mask=attention_mask)
cls_reps = outputs.last_hidden_state[:, 0]
logits = self.cls_layer(cls_reps)
return logits
class DistilBertForSentimentClassification(DistilBertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
# DistilBERT.
self.distilbert = DistilBertModel(config)
self.cls_layer = nn.Linear(config.hidden_size, 1)
def forward(self, input_ids, attention_mask):
"""
Inputs:
-input_ids : Tensor of shape [B, T] containing token ids of sequences
-attention_mask : Tensor of shape [B, T] containing attention masks to be used to avoid contibution of PAD tokens
(where B is the batch size and T is the input length)
"""
outputs = self.distilbert(input_ids=input_ids, attention_mask=attention_mask)
cls_reps = outputs.last_hidden_state[:, 0]
logits = self.cls_layer(cls_reps)
return logits