[merge] merging commit from v1.1

This commit is contained in:
Al
2017-08-14 04:04:58 -06:00
parent bb277fb326
commit 448ca6a61a
10 changed files with 374 additions and 294 deletions

View File

@@ -2,7 +2,6 @@
import six
from geodata.text import _normalize
from geodata.text.tokenize import tokenize_raw
from geodata.text.token_types import token_types
from geodata.encoding import safe_decode
@@ -17,12 +16,7 @@ NORMALIZE_STRING_TRIM = _normalize.NORMALIZE_STRING_TRIM
NORMALIZE_STRING_REPLACE_HYPHENS = _normalize.NORMALIZE_STRING_REPLACE_HYPHENS
NORMALIZE_STRING_SIMPLE_LATIN_ASCII = _normalize.NORMALIZE_STRING_SIMPLE_LATIN_ASCII
DEFAULT_STRING_OPTIONS = NORMALIZE_STRING_LATIN_ASCII | \
NORMALIZE_STRING_DECOMPOSE | \
NORMALIZE_STRING_TRIM | \
NORMALIZE_STRING_REPLACE_HYPHENS | \
NORMALIZE_STRING_STRIP_ACCENTS | \
NORMALIZE_STRING_LOWERCASE
DEFAULT_STRING_OPTIONS = _normalize.NORMALIZE_DEFAULT_STRING_OPTIONS
# Token options
NORMALIZE_TOKEN_REPLACE_HYPHENS = _normalize.NORMALIZE_TOKEN_REPLACE_HYPHENS
@@ -34,16 +28,10 @@ NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE = _normalize.NORMALIZE_TOKEN_DELETE_OTHE
NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC = _normalize.NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC
NORMALIZE_TOKEN_REPLACE_DIGITS = _normalize.NORMALIZE_TOKEN_REPLACE_DIGITS
DEFAULT_TOKEN_OPTIONS = NORMALIZE_TOKEN_REPLACE_HYPHENS | \
NORMALIZE_TOKEN_DELETE_FINAL_PERIOD | \
NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS | \
NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES | \
NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE
DEFAULT_TOKEN_OPTIONS = _normalize.NORMALIZE_DEFAULT_TOKEN_OPTIONS
TOKEN_OPTIONS_DROP_PERIODS = NORMALIZE_TOKEN_DELETE_FINAL_PERIOD | \
NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS
DEFAULT_TOKEN_OPTIONS_NUMERIC = (DEFAULT_TOKEN_OPTIONS | NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC)
TOKEN_OPTIONS_DROP_PERIODS = _normalize.NORMALIZE_TOKEN_OPTIONS_DROP_PERIODS
DEFAULT_TOKEN_OPTIONS_NUMERIC = _normalize.NORMALIZE_DEFAULT_TOKEN_OPTIONS_NUMERIC
def remove_parens(tokens):
@@ -62,33 +50,7 @@ def remove_parens(tokens):
def normalize_string(s, string_options=DEFAULT_STRING_OPTIONS):
s = safe_decode(s)
if string_options & _normalize.NORMALIZE_STRING_LATIN_ASCII:
normalized = _normalize.normalize_string_latin(s, string_options)
else:
normalized = _normalize.normalize_string_utf8(s, string_options)
return normalized
def normalize_token(s, t, token_options=DEFAULT_TOKEN_OPTIONS):
return _normalize.normalize_token(s, t, token_options)
def normalize_tokens_whitespace(s, raw_tokens, token_options=DEFAULT_TOKEN_OPTIONS):
last_end = 0
tokens = []
for t in raw_tokens:
t_norm = _normalize.normalize_token(s, t, token_options)
t_class = token_types.from_id(t[-1])
if last_end < t[0]:
tokens.append((six.u(' '), token_types.WHITESPACE))
last_end = sum(t[:2])
tokens.append((t_norm, t_class))
return tokens
return _normalize.normalize_string(s, string_options)
def normalized_tokens(s, string_options=DEFAULT_STRING_OPTIONS,
@@ -105,20 +67,10 @@ def normalized_tokens(s, string_options=DEFAULT_STRING_OPTIONS,
Usage:
normalized_tokens(u'St.-Barthélemy')
'''
normalized = normalize_string(s, string_options=string_options)
# Tuples of (offset, len, type)
raw_tokens = tokenize_raw(normalized)
tokens = []
last_end = 0
if not whitespace:
tokens = [(_normalize.normalize_token(normalized, t, token_options),
token_types.from_id(t[-1])) for t in raw_tokens]
else:
tokens = normalize_tokens_whitespace(normalized, raw_tokens, token_options=token_options)
s = safe_decode(s)
normalized_tokens = _normalize.normalized_tokens(s, string_options, token_options, whitespace)
if strip_parentheticals:
return remove_parens(tokens)
else:
return tokens
normalized_tokens = remove_parens(normalized_tokens)
return [(s, token_types.from_id(token_type)) for s, token_type in normalized_tokens]

View File

@@ -1,7 +1,6 @@
#include <Python.h>
#include "src/normalize.h"
#include "src/transliterate.h"
#include <libpostal/libpostal.h>
#if PY_MAJOR_VERSION >= 3
#define IS_PY3K
@@ -19,9 +18,7 @@ struct module_state {
static struct module_state _state;
#endif
static PyObject *py_normalize_string_utf8(PyObject *self, PyObject *args)
static PyObject *py_normalize_string(PyObject *self, PyObject *args)
{
PyObject *arg1;
uint64_t options;
@@ -48,7 +45,7 @@ static PyObject *py_normalize_string_utf8(PyObject *self, PyObject *args)
if (str == NULL) {
PyErr_SetString(PyExc_TypeError,
"Parameter could not be utf-8 encoded");
goto exit_decref_unistr;
goto exit_normalize_decref_unistr;
}
char *input = PyBytes_AsString(str);
@@ -56,13 +53,13 @@ static PyObject *py_normalize_string_utf8(PyObject *self, PyObject *args)
#endif
if (input == NULL) {
goto exit_decref_str;
goto exit_normalize_decref_str;
}
char *normalized = normalize_string_utf8(input, options);
char *normalized = libpostal_normalize_string(input, options);
if (normalized == NULL) {
goto exit_decref_str;
goto exit_normalize_decref_str;
}
PyObject *result = PyUnicode_DecodeUTF8((const char *)normalized, strlen(normalized), "strict");
@@ -70,7 +67,7 @@ static PyObject *py_normalize_string_utf8(PyObject *self, PyObject *args)
if (result == NULL) {
PyErr_SetString(PyExc_ValueError,
"Result could not be utf-8 decoded");
goto exit_decref_str;
goto exit_normalize_decref_str;
}
#ifndef IS_PY3K
@@ -80,21 +77,26 @@ static PyObject *py_normalize_string_utf8(PyObject *self, PyObject *args)
return result;
exit_decref_str:
exit_normalize_decref_str:
#ifndef IS_PY3K
Py_XDECREF(str);
#endif
exit_decref_unistr:
exit_normalize_decref_unistr:
Py_XDECREF(unistr);
return 0;
}
static PyObject *py_normalize_string_latin(PyObject *self, PyObject *args)
static PyObject *py_normalized_tokens(PyObject *self, PyObject *args)
{
PyObject *arg1;
uint64_t options;
if (!PyArg_ParseTuple(args, "OK:normalize", &arg1, &options)) {
uint64_t string_options = LIBPOSTAL_NORMALIZE_DEFAULT_STRING_OPTIONS;
uint64_t token_options = LIBPOSTAL_NORMALIZE_DEFAULT_TOKEN_OPTIONS;
uint32_t arg_whitespace = 0;
PyObject *result = NULL;
if (!PyArg_ParseTuple(args, "O|KKI:normalize", &arg1, &string_options, &token_options, &arg_whitespace)) {
return 0;
}
@@ -117,7 +119,7 @@ static PyObject *py_normalize_string_latin(PyObject *self, PyObject *args)
if (str == NULL) {
PyErr_SetString(PyExc_TypeError,
"Parameter could not be utf-8 encoded");
goto exit_decref_unistr;
goto exit_normalized_tokens_decref_str;
}
char *input = PyBytes_AsString(str);
@@ -125,98 +127,46 @@ static PyObject *py_normalize_string_latin(PyObject *self, PyObject *args)
#endif
if (input == NULL) {
goto exit_decref_str;
goto exit_normalized_tokens_decref_str;
}
char *normalized = normalize_string_latin(input, strlen(input), options);
bool whitespace = arg_whitespace;
PyObject *result = PyUnicode_DecodeUTF8((const char *)normalized, strlen(normalized), "strict");
free(normalized);
if (result == NULL) {
PyErr_SetString(PyExc_ValueError,
"Result could not be utf-8 decoded");
goto exit_decref_str;
size_t num_tokens;
libpostal_normalized_token_t *normalized_tokens = libpostal_normalized_tokens(input, string_options, token_options, whitespace, &num_tokens);
if (normalized_tokens == NULL) {
goto exit_normalized_tokens_decref_str;
}
#ifndef IS_PY3K
Py_XDECREF(str);
#endif
Py_XDECREF(unistr);
return result;
exit_decref_str:
#ifndef IS_PY3K
Py_XDECREF(str);
#endif
exit_decref_unistr:
Py_XDECREF(unistr);
return 0;
}
static PyObject *py_normalize_token(PyObject *self, PyObject *args)
{
PyObject *s;
uint32_t offset;
uint32_t len;
uint16_t type;
uint64_t options;
if (!PyArg_ParseTuple(args, "O(IIH)K:normalize", &s, &offset, &len, &type, &options)) {
PyErr_SetString(PyExc_TypeError,
"Error parsing arguments");
return 0;
result = PyList_New((Py_ssize_t)num_tokens);
if (!result) {
goto exit_free_normalized_tokens;
}
token_t token = (token_t){(size_t)offset, (size_t)len, type};
PyObject *unistr = PyUnicode_FromObject(s);
if (unistr == NULL) {
PyErr_SetString(PyExc_TypeError,
"Parameter could not be converted to unicode in scanner");
return 0;
}
#ifdef IS_PY3K
// Python 3 encoding, supported by Python 3.3+
char *input = PyUnicode_AsUTF8(unistr);
#else
// Python 2 encoding
PyObject *str = PyUnicode_AsEncodedString(unistr, "utf-8", "strict");
if (str == NULL) {
PyErr_SetString(PyExc_ValueError,
"Parameter could not be utf-8 encoded");
goto exit_decref_unistr;
for (size_t i = 0; i < num_tokens; i++) {
libpostal_normalized_token_t normalized_token = normalized_tokens[i];
char *token_str = normalized_token.str;
PyObject *py_token = PyUnicode_DecodeUTF8((const char *)token_str, strlen(token_str), "strict");
if (py_token == NULL) {
Py_DECREF(result);
goto exit_free_normalized_tokens;
}
char *input = PyBytes_AsString(str);
PyObject *t = PyTuple_New(2);
PyObject *py_token_type = PyInt_FromLong(normalized_token.token.type);
#endif
PyTuple_SetItem(t, 0, py_token);
PyTuple_SetItem(t, 1, py_token_type);
if (input == NULL) {
goto exit_decref_str;
// Note: PyList_SetItem steals a reference, so don't worry about DECREF
PyList_SetItem(result, (Py_ssize_t)i, t);
}
char_array *token_buffer = char_array_new_size(token.len);
add_normalized_token(token_buffer, input, token, options);
char *token_str = char_array_get_string(token_buffer);
PyObject *result = PyUnicode_DecodeUTF8((const char *)token_str, token_buffer->n - 1, "strict");
if (result == NULL) {
PyErr_SetString(PyExc_ValueError,
"Error decoding token");
char_array_destroy(token_buffer);
goto exit_decref_str;
for (size_t i = 0; i < num_tokens; i++) {
free(normalized_tokens[i].str);
}
char_array_destroy(token_buffer);
free(normalized_tokens);
#ifndef IS_PY3K
Py_XDECREF(str);
@@ -224,20 +174,24 @@ static PyObject *py_normalize_token(PyObject *self, PyObject *args)
Py_XDECREF(unistr);
return result;
exit_decref_str:
exit_free_normalized_tokens:
for (size_t i = 0; i < num_tokens; i++) {
free(normalized_tokens[i].str);
}
free(normalized_tokens);
exit_normalized_tokens_decref_str:
#ifndef IS_PY3K
Py_XDECREF(str);
#endif
exit_decref_unistr:
exit_normalized_tokens_decref_unistr:
Py_XDECREF(unistr);
return 0;
}
static PyMethodDef normalize_methods[] = {
{"normalize_string_utf8", (PyCFunction)py_normalize_string_utf8, METH_VARARGS, "normalize_string_utf8(input, options)"},
{"normalize_string_latin", (PyCFunction)py_normalize_string_latin, METH_VARARGS, "normalize_string_latin(input, options)"},
{"normalize_token", (PyCFunction)py_normalize_token, METH_VARARGS, "normalize_token(input, options)"},
{"normalize_string", (PyCFunction)py_normalize_string, METH_VARARGS, "normalize_string(input, options)"},
{"normalized_tokens", (PyCFunction)py_normalized_tokens, METH_VARARGS, "normalize_token(input, string_options, token_options, whitespace)"},
{NULL, NULL},
};
@@ -295,32 +249,40 @@ init_normalize(void) {
INITERROR;
}
if (!transliteration_module_setup(NULL)) {
if (!libpostal_setup()) {
PyErr_SetString(PyExc_RuntimeError,
"Could not load transliterate module");
"Could not load libpostal");
Py_DECREF(module);
INITERROR;
}
PyModule_AddObject(module, "NORMALIZE_STRING_LATIN_ASCII", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_LATIN_ASCII));
PyModule_AddObject(module, "NORMALIZE_STRING_TRANSLITERATE", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_TRANSLITERATE));
PyModule_AddObject(module, "NORMALIZE_STRING_STRIP_ACCENTS", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_STRIP_ACCENTS));
PyModule_AddObject(module, "NORMALIZE_STRING_DECOMPOSE", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_DECOMPOSE));
PyModule_AddObject(module, "NORMALIZE_STRING_COMPOSE", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_COMPOSE));
PyModule_AddObject(module, "NORMALIZE_STRING_LOWERCASE", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_LOWERCASE));
PyModule_AddObject(module, "NORMALIZE_STRING_TRIM", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_TRIM));
PyModule_AddObject(module, "NORMALIZE_STRING_REPLACE_HYPHENS", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_REPLACE_HYPHENS));
PyModule_AddObject(module, "NORMALIZE_STRING_SIMPLE_LATIN_ASCII", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_SIMPLE_LATIN_ASCII));
PyModule_AddObject(module, "NORMALIZE_STRING_LATIN_ASCII", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_LATIN_ASCII));
PyModule_AddObject(module, "NORMALIZE_STRING_TRANSLITERATE", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_TRANSLITERATE));
PyModule_AddObject(module, "NORMALIZE_STRING_STRIP_ACCENTS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_STRIP_ACCENTS));
PyModule_AddObject(module, "NORMALIZE_STRING_DECOMPOSE", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_DECOMPOSE));
PyModule_AddObject(module, "NORMALIZE_STRING_COMPOSE", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_COMPOSE));
PyModule_AddObject(module, "NORMALIZE_STRING_LOWERCASE", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_LOWERCASE));
PyModule_AddObject(module, "NORMALIZE_STRING_TRIM", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_TRIM));
PyModule_AddObject(module, "NORMALIZE_STRING_REPLACE_HYPHENS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_REPLACE_HYPHENS));
PyModule_AddObject(module, "NORMALIZE_STRING_SIMPLE_LATIN_ASCII", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_SIMPLE_LATIN_ASCII));
PyModule_AddObject(module, "NORMALIZE_TOKEN_REPLACE_HYPHENS", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_REPLACE_HYPHENS));
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_HYPHENS", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_DELETE_HYPHENS));
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_FINAL_PERIOD", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_DELETE_FINAL_PERIOD));
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS));
PyModule_AddObject(module, "NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES));
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE));
PyModule_AddObject(module, "NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC));
PyModule_AddObject(module, "NORMALIZE_TOKEN_REPLACE_DIGITS", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_REPLACE_DIGITS));
PyModule_AddObject(module, "NORMALIZE_TOKEN_REPLACE_HYPHENS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_HYPHENS));
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_HYPHENS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_DELETE_HYPHENS));
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_FINAL_PERIOD", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_DELETE_FINAL_PERIOD));
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS));
PyModule_AddObject(module, "NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES));
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE));
PyModule_AddObject(module, "NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC));
PyModule_AddObject(module, "NORMALIZE_TOKEN_REPLACE_DIGITS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_DIGITS));
PyModule_AddObject(module, "NORMALIZE_DEFAULT_STRING_OPTIONS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_DEFAULT_STRING_OPTIONS));
PyModule_AddObject(module, "NORMALIZE_DEFAULT_TOKEN_OPTIONS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_DEFAULT_TOKEN_OPTIONS));
PyModule_AddObject(module, "NORMALIZE_TOKEN_OPTIONS_DROP_PERIODS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_OPTIONS_DROP_PERIODS));
PyModule_AddObject(module, "NORMALIZE_DEFAULT_TOKEN_OPTIONS_NUMERIC", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_DEFAULT_TOKEN_OPTIONS_NUMERIC));
#if PY_MAJOR_VERSION >= 3

View File

@@ -1,6 +1,6 @@
#include <Python.h>
#include "src/scanner.h"
#include <libpostal/libpostal.h>
#if PY_MAJOR_VERSION >= 3
#define IS_PY3K
@@ -18,14 +18,17 @@ struct module_state {
static struct module_state _state;
#endif
static PyObject *py_tokenize(PyObject *self, PyObject *args)
{
PyObject *arg1;
if (!PyArg_ParseTuple(args, "O:tokenize", &arg1)) {
uint32_t arg_whitespace = 0;
if (!PyArg_ParseTuple(args, "OI:tokenize", &arg1, &arg_whitespace)) {
return 0;
}
bool whitespace = arg_whitespace;
PyObject *unistr = PyUnicode_FromObject(arg1);
if (unistr == NULL) {
PyErr_SetString(PyExc_TypeError,
@@ -57,26 +60,28 @@ static PyObject *py_tokenize(PyObject *self, PyObject *args)
goto error_decref_str;
}
token_array *tokens = tokenize(input);
size_t num_tokens;
libpostal_token_t *tokens = libpostal_tokenize(input, whitespace, &num_tokens);
if (tokens == NULL) {
goto error_decref_str;
}
PyObject *result = PyTuple_New(tokens->n);
PyObject *result = PyTuple_New(num_tokens);
if (!result) {
token_array_destroy(tokens);
free(tokens);
goto error_decref_str;
return 0;
}
PyObject *tuple;
token_t token;
for (size_t i = 0; i < tokens->n; i++) {
token = tokens->a[i];
libpostal_token_t token;
for (size_t i = 0; i < num_tokens; i++) {
token = tokens[i];
tuple = Py_BuildValue("III", token.offset, token.len, token.type);
if (PyTuple_SetItem(result, i, tuple) < 0) {
token_array_destroy(tokens);
free(tokens);
goto error_decref_str;
}
}
@@ -86,7 +91,7 @@ static PyObject *py_tokenize(PyObject *self, PyObject *args)
#endif
Py_XDECREF(unistr);
token_array_destroy(tokens);
free(tokens);
return result;
@@ -100,12 +105,10 @@ error_decref_unistr:
}
static PyMethodDef tokenize_methods[] = {
{"tokenize", (PyCFunction)py_tokenize, METH_VARARGS, "tokenize(text)"},
{"tokenize", (PyCFunction)py_tokenize, METH_VARARGS, "tokenize(text, whitespace)"},
{NULL, NULL},
};
#ifdef IS_PY3K
static int tokenize_traverse(PyObject *m, visitproc visit, void *arg) {

View File

@@ -3,12 +3,9 @@ from geodata.text import _tokenize
from geodata.text.token_types import token_types
def tokenize_raw(s):
return _tokenize.tokenize(safe_decode(s))
def tokenize(s):
def tokenize(s, whitespace=False):
u = safe_decode(s)
s = safe_encode(s)
return [(safe_decode(s[start:start + length]), token_types.from_id(token_type))
for start, length, token_type in _tokenize.tokenize(u)]
for start, length, token_type in _tokenize.tokenize(u, whitespace)]

View File

@@ -2,9 +2,7 @@ import os
from setuptools import setup, Extension, find_packages
this_dir = os.path.dirname(__file__)
PROJECT_DIR = os.path.join(this_dir, os.pardir)
SRC_DIR = os.path.join(PROJECT_DIR, 'src')
RESOURCES_DIR = 'resources'
def main():
@@ -14,35 +12,29 @@ def main():
packages=find_packages(),
ext_modules=[
Extension('geodata.text._tokenize',
sources=[os.path.join(SRC_DIR, f)
for f in ('scanner.c',
'string_utils.c',
'tokens.c',
'utf8proc/utf8proc.c',
)
] + ['geodata/text/pytokenize.c'],
include_dirs=[PROJECT_DIR],
extra_compile_args=['-O0', '-std=gnu99',
sources=['geodata/text/pytokenize.c'],
libraries=['postal'],
include_dirs=['/usr/local/include'],
library_dirs=['/usr/local/lib'],
extra_compile_args=['-std=c99',
'-Wno-unused-function'],
),
Extension('geodata.text._normalize',
sources=[os.path.join(SRC_DIR, f)
for f in ('normalize.c',
'string_utils.c',
'utf8proc/utf8proc.c',
'tokens.c',
'unicode_scripts.c',
'transliterate.c',
'file_utils.c',
'trie.c',
'trie_search.c',)
] + ['geodata/text/pynormalize.c'],
include_dirs=[PROJECT_DIR],
extra_compile_args=['-std=gnu99', '-DHAVE_CONFIG_H',
'-DLIBPOSTAL_DATA_DIR="{}"'.format(os.getenv('LIBPOSTAL_DATA_DIR', os.path.realpath(os.path.join(PROJECT_DIR, 'data')))),
sources=['geodata/text/pynormalize.c'],
libraries=['postal'],
include_dirs=['/usr/local/include'],
library_dirs=['/usr/local/lib'],
extra_compile_args=['-std=c99',
'-Wno-unused-function'],
),
],
data_files=[
(os.path.join(RESOURCES_DIR, os.path.relpath(d, RESOURCES_DIR)), [os.path.join(d, filename) for filename in filenames])
for d, _, filenames in os.walk(RESOURCES_DIR)
],
package_data={
'geodata': ['**/*.sh']
},
include_package_data=True,
zip_safe=False,
url='http://mapzen.com',