[merge] merging commit from v1.1

This commit is contained in:
Al
2017-08-14 04:04:58 -06:00
parent bb277fb326
commit 448ca6a61a
10 changed files with 374 additions and 294 deletions

View File

@@ -2,7 +2,6 @@
import six
from geodata.text import _normalize
from geodata.text.tokenize import tokenize_raw
from geodata.text.token_types import token_types
from geodata.encoding import safe_decode
@@ -17,12 +16,7 @@ NORMALIZE_STRING_TRIM = _normalize.NORMALIZE_STRING_TRIM
NORMALIZE_STRING_REPLACE_HYPHENS = _normalize.NORMALIZE_STRING_REPLACE_HYPHENS
NORMALIZE_STRING_SIMPLE_LATIN_ASCII = _normalize.NORMALIZE_STRING_SIMPLE_LATIN_ASCII
DEFAULT_STRING_OPTIONS = NORMALIZE_STRING_LATIN_ASCII | \
NORMALIZE_STRING_DECOMPOSE | \
NORMALIZE_STRING_TRIM | \
NORMALIZE_STRING_REPLACE_HYPHENS | \
NORMALIZE_STRING_STRIP_ACCENTS | \
NORMALIZE_STRING_LOWERCASE
DEFAULT_STRING_OPTIONS = _normalize.NORMALIZE_DEFAULT_STRING_OPTIONS
# Token options
NORMALIZE_TOKEN_REPLACE_HYPHENS = _normalize.NORMALIZE_TOKEN_REPLACE_HYPHENS
@@ -34,16 +28,10 @@ NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE = _normalize.NORMALIZE_TOKEN_DELETE_OTHE
NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC = _normalize.NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC
NORMALIZE_TOKEN_REPLACE_DIGITS = _normalize.NORMALIZE_TOKEN_REPLACE_DIGITS
DEFAULT_TOKEN_OPTIONS = NORMALIZE_TOKEN_REPLACE_HYPHENS | \
NORMALIZE_TOKEN_DELETE_FINAL_PERIOD | \
NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS | \
NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES | \
NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE
DEFAULT_TOKEN_OPTIONS = _normalize.NORMALIZE_DEFAULT_TOKEN_OPTIONS
TOKEN_OPTIONS_DROP_PERIODS = NORMALIZE_TOKEN_DELETE_FINAL_PERIOD | \
NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS
DEFAULT_TOKEN_OPTIONS_NUMERIC = (DEFAULT_TOKEN_OPTIONS | NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC)
TOKEN_OPTIONS_DROP_PERIODS = _normalize.NORMALIZE_TOKEN_OPTIONS_DROP_PERIODS
DEFAULT_TOKEN_OPTIONS_NUMERIC = _normalize.NORMALIZE_DEFAULT_TOKEN_OPTIONS_NUMERIC
def remove_parens(tokens):
@@ -62,33 +50,7 @@ def remove_parens(tokens):
def normalize_string(s, string_options=DEFAULT_STRING_OPTIONS):
s = safe_decode(s)
if string_options & _normalize.NORMALIZE_STRING_LATIN_ASCII:
normalized = _normalize.normalize_string_latin(s, string_options)
else:
normalized = _normalize.normalize_string_utf8(s, string_options)
return normalized
def normalize_token(s, t, token_options=DEFAULT_TOKEN_OPTIONS):
return _normalize.normalize_token(s, t, token_options)
def normalize_tokens_whitespace(s, raw_tokens, token_options=DEFAULT_TOKEN_OPTIONS):
last_end = 0
tokens = []
for t in raw_tokens:
t_norm = _normalize.normalize_token(s, t, token_options)
t_class = token_types.from_id(t[-1])
if last_end < t[0]:
tokens.append((six.u(' '), token_types.WHITESPACE))
last_end = sum(t[:2])
tokens.append((t_norm, t_class))
return tokens
return _normalize.normalize_string(s, string_options)
def normalized_tokens(s, string_options=DEFAULT_STRING_OPTIONS,
@@ -105,20 +67,10 @@ def normalized_tokens(s, string_options=DEFAULT_STRING_OPTIONS,
Usage:
normalized_tokens(u'St.-Barthélemy')
'''
normalized = normalize_string(s, string_options=string_options)
# Tuples of (offset, len, type)
raw_tokens = tokenize_raw(normalized)
tokens = []
last_end = 0
if not whitespace:
tokens = [(_normalize.normalize_token(normalized, t, token_options),
token_types.from_id(t[-1])) for t in raw_tokens]
else:
tokens = normalize_tokens_whitespace(normalized, raw_tokens, token_options=token_options)
s = safe_decode(s)
normalized_tokens = _normalize.normalized_tokens(s, string_options, token_options, whitespace)
if strip_parentheticals:
return remove_parens(tokens)
else:
return tokens
normalized_tokens = remove_parens(normalized_tokens)
return [(s, token_types.from_id(token_type)) for s, token_type in normalized_tokens]