moved last_tab to tokenize_twitter

Former-commit-id: 7fc23666a9
This commit is contained in:
Joshua Chin 2015-07-17 15:10:17 -04:00
parent 4e87458242
commit 919f2f5912
2 changed files with 14 additions and 10 deletions

View File

@ -2,9 +2,17 @@ from wordfreq_builder.tokenizers import cld2_surface_tokenizer, tokenize_file
import argparse
def last_tab(line):
"""
Read lines by keeping only the last tab-separated value.
"""
return line.split('\t')[-1].strip()
def tokenize_twitter(in_filename, out_prefix):
tokenize_file(in_filename, out_prefix,
tokenizer=cld2_surface_tokenizer)
tokenizer=cld2_surface_tokenizer,
line_reader=last_tab)
def main():

View File

@ -40,14 +40,7 @@ def cld2_detect_language(text):
return pycld2.detect(text)[2][0][1]
def last_tab(line):
"""
Read lines by keeping only the last tab-separated value.
"""
return line.split('\t')[-1].strip()
def tokenize_file(in_filename, out_prefix, tokenizer, line_reader=last_tab):
def tokenize_file(in_filename, out_prefix, tokenizer, line_reader=None):
"""
Process a file by running it through the given tokenizer, sorting the
results by the language of each line, and inserting newlines
@ -56,7 +49,10 @@ def tokenize_file(in_filename, out_prefix, tokenizer, line_reader=last_tab):
out_files = {}
with open(in_filename, encoding='utf-8') as in_file:
for line in in_file:
text = line_reader(line)
if line_reader is not None:
text = line_reader(line)
else:
text = line
language, tokens = tokenizer(text)
if language != 'un':
tokenized = '\n'.join(tokens)