Merge pull request #32 from LuminosoInsight/thai-fix

Leave Thai segments alone in the default regex

Former-commit-id: 84497429e1
This commit is contained in:
Andrew Lin 2016-03-10 11:57:44 -05:00
commit 68e7846d50
2 changed files with 19 additions and 11 deletions

View File

@ -100,7 +100,7 @@ def test_tokenization():
# data # data
eq_(tokenize("I don't split at apostrophes, you see.", 'en'), eq_(tokenize("I don't split at apostrophes, you see.", 'en'),
['i', "don't", 'split', 'at', 'apostrophes', 'you', 'see']) ['i', "don't", 'split', 'at', 'apostrophes', 'you', 'see'])
eq_(tokenize("I don't split at apostrophes, you see.", 'en', include_punctuation=True), eq_(tokenize("I don't split at apostrophes, you see.", 'en', include_punctuation=True),
['i', "don't", 'split', 'at', 'apostrophes', ',', 'you', 'see', '.']) ['i', "don't", 'split', 'at', 'apostrophes', ',', 'you', 'see', '.'])
@ -180,3 +180,10 @@ def test_ideographic_fallback():
tokenize(ja_text, 'en'), tokenize(ja_text, 'en'),
['ひらがな', 'カタカナ', 'romaji'] ['ひらがな', 'カタカナ', 'romaji']
) )
# Test that we leave Thai letters stuck together. If we had better Thai support,
# we would actually split this into a three-word phrase.
eq_(tokenize('การเล่นดนตรี', 'th'), ['การเล่นดนตรี'])
eq_(tokenize('"การเล่นดนตรี" means "playing music"', 'en'),
['การเล่นดนตรี', 'means', 'playing', 'music'])

View File

@ -3,23 +3,24 @@ import unicodedata
TOKEN_RE = regex.compile(r""" TOKEN_RE = regex.compile(r"""
# Case 1: a special case for Chinese and Japanese # Case 1: a special case for non-spaced languages
# ----------------------------------------------- # -----------------------------------------------
# When we see characters that are Han ideographs (\p{IsIdeo}) or hiragana # When we see characters that are Han ideographs (\p{IsIdeo}), hiragana
# (\p{Script=Hiragana}), we allow a sequence of those characters to be # (\p{Script=Hiragana}), or Thai (\p{Script=Thai}), we allow a sequence
# glued together as a single token. Without this case, the standard rule # of those characters to be glued together as a single token.
# (case 2) would make each character a separate token. This would be the
# correct behavior for word-wrapping, but a messy failure mode for NLP
# tokenization.
# #
# It is, of course, better to use a tokenizer that is designed for Chinese # Without this case, the standard rule (case 2) would make each character
# or Japanese text. This is effectively a fallback for when the wrong # a separate token. This would be the correct behavior for word-wrapping,
# but a messy failure mode for NLP tokenization.
#
# It is, of course, better to use a tokenizer that is designed for Chinese,
# Japanese, or Thai text. This is effectively a fallback for when the wrong
# tokenizer is used. # tokenizer is used.
# #
# This rule is listed first so that it takes precedence. # This rule is listed first so that it takes precedence.
[\p{IsIdeo}\p{Script=Hiragana}]+ | [\p{IsIdeo}\p{Script=Hiragana}\p{Script=Thai}]+ |
# Case 2: standard Unicode segmentation # Case 2: standard Unicode segmentation
# ------------------------------------- # -------------------------------------