e@0: #!/usr/bin/env python3 e@0: # -*- coding: utf-8 -*- e@0: """ e@0: Created on Sun Apr 1 14:05:17 2018 e@0: e@0: @author: Emmanouil Theofanis Chourdakis e@0: """ e@0: e@0: from pypeg2 import * e@0: import re e@0: e@0: def var_generator(T): e@0: I = 0 e@0: while True: e@0: I+=1 e@0: yield "{}{}".format(T, I) e@0: e@0: e@0: e@0: def l_label_generator(T): e@0: I = 0 e@0: while True: e@0: e@0: I+=1 e@0: yield "<{}LINE{}>".format(T, I) e@0: e@0: annot_var = re.compile("[A-Z][0-9]+") e@0: annot_pos = re.compile("[0-9]+ [0-9]+(\;[0-9]+ [0-9]+)*") e@0: annot_label = re.compile('[A-Za-z0-9_]+') e@0: label_var_tuple = re.compile(r'[A-Za-z0-9_]+\:[A-Z][0-9]+') e@0: e@0: class AnnotationType(Keyword): e@0: grammar = Enum(K("Place"), e@0: K("Character"), e@0: K("Character_Line"), e@0: K("Motion"), e@0: K("Motion_Signal"), e@0: K("Says"), e@0: K("Spatial_Signal")) e@0: e@0: class AttributeType(Keyword): e@0: grammar = Enum(K("Age"), K("Gender")) e@0: e@0: class AnnotationTuple: e@0: grammar = attr('variable',annot_var),\ e@0: attr('type',AnnotationType),\ e@0: attr('idx',annot_pos),\ e@0: attr('annotation',restline) e@0: e@0: e@0: class AttributeTuple: e@0: grammar = attr('variable', annot_var),\ e@0: attr('type',AttributeType), \ e@0: attr('target', annot_var), \ e@0: attr('annotation', restline) e@0: e@0: class VarArg: e@0: grammar = attr('label', annot_label), ':', attr('target', annot_var) e@0: e@0: class VarArgs(List): e@0: grammar = some(VarArg) e@0: e@0: class RelationTuple: e@0: grammar = attr('variable', annot_var),\ e@0: attr('args', VarArgs) e@0: e@0: class AnnotLine(List): e@0: grammar = [AnnotationTuple, AttributeTuple, RelationTuple] e@0: e@0: class AnnotationFile(List): e@0: grammar = some(AnnotLine) e@0: e@0: def get_tokens_by_label(label, sent_tokens, sent_labels): e@0: e@0: tokens = [] e@0: blabel = "B-{}".format(label) e@0: ilabel = 'I-{}'.format(label) e@0: e@0: tok_ = [] e@0: for n,l in enumerate(sent_labels): e@0: if l == blabel: e@0: if len(tok_) > 0: e@0: tokens.append(tok_) e@0: tok_ = [sent_tokens[n]] e@0: elif l == ilabel: e@0: tok_.append(sent_tokens[n]) e@0: else: e@0: if len(tok_)>0: e@0: tokens.append(tok_) e@0: tok_ = [] e@0: return tokens e@0: e@0: def get_token_head(span): e@0: e@0: span_idx = [tok.i for tok in span] e@0: head = span[0] e@0: while head.head.i in span_idx: e@0: if head == head.head: e@0: return head e@0: e@0: head = head.head e@0: return head e@0: e@0: e@0: def get_min_dep_path(a, b, doc, LCA): e@0: e@0: lca_idx = LCA[a,b] e@0: e@0: if lca_idx == -1: e@0: return "" e@0: e@0: lca = doc[LCA[a, b]] e@0: e@0: m_a = [] e@0: m_b = [] e@0: e@0: # From tra go up to the LCA. e@0: e@0: tok = doc[a] e@0: while tok != lca: e@0: if tok.head != None: e@0: m_a.append(('up', tok.dep_)) e@0: tok = tok.head e@0: e@0: tok = doc[b] e@0: while tok != lca: e@0: if tok.head != None: e@0: m_b.append(('down', tok.dep_)) e@0: tok = tok.head e@0: m_b.reverse() e@0: e@0: path = m_a + m_b e@0: e@0: return "::".join("{}|{}".format(tup[0], tup[1]) for tup in path) e@0: def get_dep_with_head(tok): e@0: dep_ = [] e@0: while tok.head != tok: e@0: dep_.append(tok.dep_) e@0: tok = tok.head e@0: e@0: if len(dep_) == 1: e@0: return dep_[0], tok.lemma_ e@0: else: e@0: return None, tok.lemma_ e@0: e@0: def var_generator(T): e@0: I = 0 e@0: while True: e@0: I+=1 e@0: yield "{}{}".format(T, I) e@0: e@0: def get_dep_with_head(tok): e@0: dep_ = [] e@0: while tok.head != tok: e@0: dep_.append(tok.dep_) e@0: tok = tok.head e@0: e@0: if len(dep_) == 1: e@0: return dep_[0], tok.lemma_ e@0: else: e@0: return None, tok.lemma_ e@0: e@0: class Document: e@0: def __init__(self, doc): e@0: e@0: self.doc = doc e@0: self.LCA = doc.get_lca_matrix() e@0: self.text = doc.text e@0: self.sentences = [str(s) for s in doc.sents] e@0: e@0: self.tokens = [] e@0: self.token_sentences = [] e@0: e@0: self.relations = [] e@0: e@0: for m, sent in enumerate(doc.sents): e@0: tlist = [] e@0: for n, tok in enumerate(sent): e@0: token = Token(tok, doc, tok.i, sent, n) e@0: tlist.append(token) e@0: self.token_sentences.append(tlist) e@0: self.tokens += tlist e@0: e@0: def add_token(self, token, doc, doc_idx, sent, sent_idx, label='NONE'): e@0: token = Token(token, doc, doc_idx, sent, sent_idx, label) e@0: self.tokens.append(token) e@0: e@0: def add_relation(self, trigger, arg1, arg2, label): e@0: self.relations.append(Relation(arg1, arg2, trigger, self.LCA, label)) e@0: e@0: def find_tokens(self, start, end): e@0: tokens = [] e@0: for tok in self.tokens: e@0: if tok.start >= start and tok.end <= end: e@0: tokens.append(tok) e@0: e@0: return tokens e@0: e@0: def assign_label_to_tokens(self, start, end, label): e@0: tokens = self.find_tokens(start, end) e@0: for n, token in enumerate(tokens): e@0: if n == 0: e@0: IOB = 'B' e@0: else: e@0: IOB = 'I' e@0: e@0: token.set_label('{}-{}'.format(IOB, label)) e@0: e@0: def assign_label_to_tokens_by_matching_lemma(self, lemma, label): e@0: for t in self.tokens: e@0: if t.token.lemma_ == lemma: e@0: t.label = 'B-{}'.format(label) e@0: e@0: def assign_attribute_to_tokens(self, start, end, label, attribute): e@0: tokens = self.find_tokens(start, end) e@0: for n, token in enumerate(tokens): e@0: token.set_attribute(label, attribute) e@0: e@0: def get_token_features_labels(self): e@0: features = [] e@0: labels = [] e@0: e@0: for sentence in self.token_sentences: e@0: sentence_features = [] e@0: sentence_labels = [] e@0: e@0: for token in sentence: e@0: sentence_features.append(token.get_feature_vector()) e@0: sentence_labels.append(token.label) e@0: e@0: features.append(sentence_features) e@0: labels.append(sentence_labels) e@0: e@0: return features, labels e@0: e@0: def get_token_features_attributes(self, label): e@0: features = [] e@0: labels = [] e@0: e@0: for sentence in self.token_sentences: e@0: sentence_features = [] e@0: sentence_labels = [] e@0: e@0: for token in sentence: e@0: sentence_features.append(token.get_feature_vector()) e@0: if label in token.attributes: e@0: sentence_labels.append(token.attributes[label]) e@0: else: e@0: sentence_labels.append('O') e@0: e@0: features.append(sentence_features) e@0: labels.append(sentence_labels) e@0: e@0: return features, labels e@0: e@0: def get_gold_relation_feature_labels(self): e@0: features = [] e@0: labels = [] e@0: for r in self.relations: e@0: feat = r.get_feature_vector() e@0: label = r.label e@0: e@0: features.append(feat) e@0: labels.append(label) e@0: e@0: return features, labels e@0: e@0: def get_candidate_relation_feature_labels(self): e@0: features = [] e@0: labels = [] e@0: e@0: candidate_relations = self.get_candidate_relations() e@0: for r in candidate_relations: e@0: feat = r.get_feature_vector() e@0: label = r.label e@0: e@0: features.append(feat) e@0: labels.append(label) e@0: e@0: return features, labels e@0: e@0: e@0: def get_tokens_with_label(self, label): e@0: e@0: blabel = "B-{}".format(label) e@0: ilabel = 'I-{}'.format(label) e@0: e@0: tokens = [] e@0: e@0: for I in range(len(self.token_sentences)): e@0: tokens_ = [] e@0: sent_tokens = self.token_sentences[I] e@0: sent_labels = [t.label for t in sent_tokens] e@0: e@0: tok_ = [] e@0: for n,l in enumerate(sent_labels): e@0: if l == blabel: e@0: if len(tok_) > 0: e@0: tokens_.append(tok_) e@0: tok_ = [sent_tokens[n]] e@0: elif l == ilabel: e@0: tok_.append(sent_tokens[n]) e@0: else: e@0: if len(tok_)>0: e@0: tokens_.append(tok_) e@0: tok_ = [] e@0: tokens.append(tokens_) e@0: e@0: return tokens e@0: e@0: def get_candidate_relations(self): e@0: candidate_relations = [] e@0: e@0: characters = self.get_tokens_with_label('Character') e@0: places = self.get_tokens_with_label('Place') e@0: spatial_signals = self.get_tokens_with_label('Spatial_Signal') e@0: say_words = self.get_tokens_with_label('Says') e@0: character_lines = self.get_tokens_with_label('Character_Line') e@0: e@0: for I in range(len(spatial_signals)): e@0: for sp in spatial_signals[I]: e@0: for ch in characters[I]: e@0: for pl in places[I]: e@0: rel = Relation(ch, pl, sp, self.LCA) e@0: candidate_relations.append(rel) e@0: e@0: for I in range(len(say_words)): e@0: for sw in say_words[I]: e@0: for ch in characters[I]: e@0: for cl in character_lines[I]: e@0: rel = Relation(ch, cl, sw, self.LCA) e@0: candidate_relations.append(rel) e@0: e@0: for cr in candidate_relations: e@0: for r in self.relations: e@0: if cr == r: e@0: cr.label = r.label e@0: e@0: return candidate_relations e@0: e@0: def predict_relations(self, model): e@0: relations = self.get_candidate_relations() e@0: e@0: for n, r in enumerate(relations): e@0: f = r.get_feature_vector() e@0: label = model.predict([f])[0] e@0: if label != 'NONE': e@0: r.label = label e@0: self.relations.append(r) e@0: e@0: def __str__(self): e@0: return self.text e@0: e@0: class Relation: e@0: """ relation, has arg1, arg2, trigger as tokens, also label """ e@0: def __init__(self, arg1, arg2, trigger, lca, label='NONE'): e@0: self.arg1 = arg1 e@0: self.arg2 = arg2 e@0: self.trigger = trigger e@0: self.doc = trigger[0].doc e@0: self.LCA = lca e@0: self.label = label e@0: e@0: def __repr__(self): e@0: return "<{}| trigger: {}, arg1: {}, arg2: {}>".format(self.label, self.trigger, self.arg1, self.arg2) e@0: e@0: def __eq__(self, other): e@0: return all([self.arg1[n].text == other.arg1[n].text for n in range(min(len(self.arg1), len(other.arg1)))]) \ e@0: and all([self.arg2[n].text == other.arg2[n].text for n in range(min(len(self.arg2), len(other.arg2)))]) \ e@0: and all([self.trigger[n].text == other.trigger[n].text for n in range(min(len(self.trigger), len(other.trigger)))]) e@0: e@0: def get_feature_vector(self): e@0: rf = {} e@0: e@0: arg1 = get_token_head([t.token for t in self.arg1]) e@0: arg2 = get_token_head([t.token for t in self.arg2]) e@0: trigger = get_token_head([t.token for t in self.trigger]) e@0: e@0: arg1_type = self.arg1[0].label.replace('B-', '') e@0: arg2_type = self.arg2[0].label.replace('B-', '') e@0: e@0: rf['10'] = arg1_type+ '::'+ arg2_type e@0: e@0: if trigger.i < arg1.i: e@0: arg1_direction = 'right' e@0: if trigger.i > arg1.i: e@0: arg1_direction = 'left' e@0: e@0: if trigger.i < arg2.i: e@0: arg2_direction = 'right' e@0: if trigger.i > arg2.i: e@0: arg2_direction = 'left' e@0: e@0: rf['12.1'] = arg1_direction e@0: rf['12.2'] = arg2_direction e@0: rf['13'] = arg1_direction+ '::'+ arg2_direction e@0: e@0: rf['1'] = trigger.text.lower() e@0: rf['2'] = trigger.lemma_ e@0: rf['3'] = trigger.pos_ e@0: rf['4'] = rf['2'] + '::' + rf['3'] e@0: rf['11'] = rf['10'] + '::' + rf['2'] e@0: rf['14'] = rf['13'] + '::' + rf['2'] e@0: e@0: # RF15 e@0: e@0: for i, token in enumerate([arg1, arg2]): e@0: rf['5.{}'.format(i)] = token.text.lower() e@0: rf['6.{}'.format(i)] = token.lemma_ e@0: rf['7.{}'.format(i)] = token.pos_ e@0: rf['8.{}'.format(i)] = token.lemma_ + '::' + token.pos_ e@0: rf['9.{}'.format(i)] = arg1_type e@0: rf['17.{}'.format(i)] = get_min_dep_path(token.i, trigger.i, self.doc, self.LCA) e@0: rf['20'] = len(rf['17.{}'.format(i)].split('::')) e@0: e@0: rf['22.{}'.format(i)] = max(arg1.i, trigger.i) - min(arg1.i, trigger.i) e@0: e@0: e@0: e@0: e@0: rf['18'] = rf['17.0'] + '::' + rf['17.1'] e@0: e@0: deppath = get_min_dep_path(arg1.i, arg2.i, self.doc, self.LCA) e@0: rf['19'] = deppath e@0: rf['23'] = rf['22.0'] + rf['22.1'] e@0: e@0: return rf e@0: e@0: class Token: e@0: """ Named entity, has doc, sent, doc_idx, sent_idx, and label """ e@0: def __init__(self, token, doc, doc_idx, sent, sent_idx, label='O'): e@0: self.token = token e@0: self.text = token.text e@0: self.doc = doc e@0: self.doc_idx = doc_idx e@0: self.sent = sent e@0: self.sent_idx = sent_idx e@0: self.attributes = {} e@0: e@0: self.label = label e@0: self.start = self.token.idx e@0: self.end = self.token.idx + len(self.token) e@0: e@0: def __repr__(self): e@0: return "[{} -> {}]".format(repr(self.token), self.label) e@0: e@0: def set_label(self, label): e@0: # print("Token {} label changed to {}".format(self.text, label)) e@0: self.label = label e@0: e@0: def set_attribute(self, label, value): e@0: self.attributes[label] = value e@0: e@0: def get_feature_vector(self): e@0: e@0: def find_ngrams(input_list, n): e@0: return zip(*[input_list[i:] for i in range(n)]) e@0: e@0: # Stores featuer dictionary e@0: feat_dict = {} e@0: e@0: #1. Create token spans e@0: e@0: # 5 token span e@0: large_span = self.sent[max(0, self.sent_idx - 2):min(len(self.sent), self.sent_idx + 3)] e@0: e@0: # 3 token span e@0: short_span = self.sent[max(0, self.sent_idx - 1):min(len(self.sent), self.sent_idx + 2)] e@0: e@0: for i, t in enumerate(large_span): e@0: feat_dict['F.1_{}'.format(i)] = t.text e@0: feat_dict['F.2_{}'.format(i)] = t.lemma_ e@0: feat_dict['F.3_{}'.format(i)] = t.pos_ e@0: feat_dict['F.4_{}'.format(i)] = t.ent_type_ e@0: e@0: for i, t in enumerate(short_span): e@0: feat_dict['F.5_{}'.format(i)] = "::".join([t.lemma_, t.pos_]) e@0: feat_dict['F.6_{}'.format(i)] = "::".join([t.ent_type_, t.pos_]) e@0: e@0: ngrams = find_ngrams([t.pos_ for t in large_span], 2) # POS bigrams e@0: for i, ng in enumerate(ngrams): e@0: feat_dict['F.10_{}'.format(i)] = " ".join(ng) e@0: e@0: ngrams = find_ngrams([t.text for t in short_span], 2) # Raw-string bigrams e@0: for i, ng in enumerate(ngrams): e@0: feat_dict['F.11_{}'.format(i)] = " ".join(ng) e@0: e@0: # Get dependency with head if it exists e@0: dirdep, headlemma = get_dep_with_head(self.token) e@0: if dirdep is not None: e@0: feat_dict['F.7'] = dirdep e@0: feat_dict['F.8'] = "::".join([dirdep, headlemma]) e@0: e@0: # Get glove vector e@0: vector = self.token.vector e@0: for i in range(len(vector)): e@0: feat_dict['F.9_{}'.format(i)] = vector[i] e@0: e@0: e@0: return feat_dict e@0: e@0: class Character: e@0: """ Named Entity consisting of one or more tokens """ e@0: def __init__(self, name, age='none', gender='none'): e@0: self.name = name e@0: self.age = age e@0: self.gender = gender e@0: e@0: def __repr__(self): e@0: return "".format(self.name, e@0: self.age, e@0: self.gender) e@0: e@0: def __eq__(self, other): e@0: return self.name.lower() == other.name.lower() e@0: e@0: class Place: e@0: """ Named Entity consisting of one or more tokens """ e@0: def __init__(self, name): e@0: self.name = name e@0: e@0: def __repr__(self): e@0: return "".format(self.name) e@0: e@0: def __eq__(self, other): e@0: return self.name.lower() == other.name.lower() e@0: e@0: class Sayword: e@0: """ Named Entity consisting of one or more tokens """ e@0: def __init__(self, name): e@0: self.name = name e@0: e@0: def __repr__(self): e@0: return "".format(self.name) e@0: e@0: def __eq__(self, other): e@0: return self.name.lower() == other.name.lower() e@0: e@0: class CharacterLine: e@0: """ Named Entity consisting of one or more tokens """ e@0: def __init__(self, name): e@0: self.name = name e@0: e@0: def __repr__(self): e@0: return "".format(self.name) e@0: e@0: def __eq__(self, other): e@0: return self.name.lower() == other.name.lower() e@0: e@0: class SpatialSignal: e@0: """ Named Entity consisting of one or more tokens """ e@0: def __init__(self, name): e@0: self.name = name e@0: e@0: def __repr__(self): e@0: return "".format(self.name) e@0: e@0: def __eq__(self, other): e@0: return self.name.lower() == other.name.lower() e@0: e@0: e@0: e@0: