mirror of
https://github.com/rspeer/wordfreq.git
synced 2024-12-24 18:01:38 +00:00
parent
d0e0287d71
commit
f4c875983e
@ -257,6 +257,9 @@ def word_frequency(word, lang, wordlist='combined', minimum=0.):
|
||||
If a word decomposes into multiple tokens, we'll return a smoothed estimate
|
||||
of the word frequency that is no greater than the frequency of any of its
|
||||
individual tokens.
|
||||
|
||||
It should be noted that the current tokenizer does not support
|
||||
multi-character Chinese terms.
|
||||
"""
|
||||
args = (word, lang, wordlist, minimum)
|
||||
try:
|
||||
|
Loading…
Reference in New Issue
Block a user