-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path6.py
More file actions
executable file
·150 lines (107 loc) · 3.98 KB
/
6.py
File metadata and controls
executable file
·150 lines (107 loc) · 3.98 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
#!/usr/bin/python3
import numpy as np
import os
from aux import computeEmissionsCategorical, getWordCount
from RareWordClassifier import RareWordClassifier
#return count(x, y) / count(y)
def getEmission(emissions, x, y):
global classifier
word = classifier.classify(x)
return emissions[word][y]
def getq(q, u, v, w):
if((u, v, w) in q):
return q[(u, v, w)]
else:
return -1
def getT(x, k, emissions):
if(k < 0):
return ['*']
else:
global classifier
word = classifier.classify(x[k])
return emissions[word].keys()
def tagUsingViterbi(x, emissions, q):
#initialization
pi = dict()
bp = dict()
pi[(-1, '*', '*')] = 1
for k in range(len(x)):
for u in getT(x, k - 1, emissions):
for v in getT(x, k, emissions):
pi[(k, u, v)] = 0
for w in getT(x, k - 2, emissions):
q_ml = getq(q, w, u, v)
if(q_ml != -1):
this_probability = pi[(k - 1, w, u)] * q_ml * getEmission(emissions, x[k], v)
if(pi[(k, u, v)] < this_probability):
pi[(k, u, v)] = this_probability
bp[(k, u, v)] = w
max_prob = -1
n = len(x)
#tagged sequence has same length as the input sequence
y = x[:]
for u in getT(x, n - 2, emissions):
for v in getT(x, n - 1, emissions):
q_ml = getq(q, u, v, 'STOP')
if(q_ml != -1):
if(max_prob < pi[(n - 1, u, v)] * q_ml):
max_prob = pi[(n - 1, u, v)] * q_ml
y[n - 2] = u
y[n - 1] = v
for k in range(n - 3, -1, -1):
y[k] = bp[(k + 2, y[k + 1], y[k + 2])]
return (y, pi)
#this function assumes that y is 1-indexed
def gety(y, k):
if(k < 0):
return '*'
else:
return y[k]
def getLogLikelihood(x, y, pi):
log_likelihood = []
for k in range(len(x)):
log_prob = np.log(pi[(k, gety(y, k - 1), gety(y, k))])
log_likelihood.append(log_prob)
return log_likelihood
def writeThisSentence(f_output, x, y, log_likelihood):
for (word, tag, log_prob) in zip(x, y, log_likelihood):
this_line = ' '.join((word, tag, '{}'.format(log_prob)))
this_line = this_line + '\n'
f_output.write(this_line)
f_output.write('\n')
def computeq():
count_of_trigrams = dict()
count_of_bigrams = dict()
with open('ner_rare.counts') as f_input:
for line in f_input:
tokens = line.strip().split()
if(tokens[1] == '2-GRAM'):
u, v = tokens[2], tokens[3]
count_of_bigrams[(u, v)] = int(tokens[0])
elif(tokens[1] == '3-GRAM'):
u, v, w = tokens[2], tokens[3], tokens[4]
count_of_trigrams[(u, v, w)] = int(tokens[0])
q = dict()
for (u, v, w) in count_of_trigrams:
q[(u, v, w)] = float(count_of_trigrams[(u, v, w)])/ float(count_of_bigrams[(u, v)])
return q
#create the ner_train_rare_categorical.dat file using 6_1.py
os.system('python 6_1.py')
#generate the ner_rare_categorical.counts file
os.system('python2 count_freqs.py ner_train_rare_categorical.dat > ner_rare_categorical.counts')
#initial parameters
q = computeq()
count_of_words = getWordCount()
classifier = RareWordClassifier(count_of_words)
emissions = computeEmissionsCategorical(classifier)
with open('ner_dev.dat') as f_input, open('6.txt', 'w') as f_output:
x = []
for line in f_input:
tokens = line.strip().split()
if(len(tokens) > 0):
x.append(tokens[0])
else:
y, pi = tagUsingViterbi(x, emissions, q)
log_likelihood = getLogLikelihood(x, y, pi)
writeThisSentence(f_output, x, y, log_likelihood)
x = []