diff --git a/scripts/geodata/text/normalize.py b/scripts/geodata/text/normalize.py index 87df1227..70a70be0 100644 --- a/scripts/geodata/text/normalize.py +++ b/scripts/geodata/text/normalize.py @@ -2,7 +2,6 @@ import six from geodata.text import _normalize -from geodata.text.tokenize import tokenize_raw from geodata.text.token_types import token_types from geodata.encoding import safe_decode @@ -17,12 +16,7 @@ NORMALIZE_STRING_TRIM = _normalize.NORMALIZE_STRING_TRIM NORMALIZE_STRING_REPLACE_HYPHENS = _normalize.NORMALIZE_STRING_REPLACE_HYPHENS NORMALIZE_STRING_SIMPLE_LATIN_ASCII = _normalize.NORMALIZE_STRING_SIMPLE_LATIN_ASCII -DEFAULT_STRING_OPTIONS = NORMALIZE_STRING_LATIN_ASCII | \ - NORMALIZE_STRING_DECOMPOSE | \ - NORMALIZE_STRING_TRIM | \ - NORMALIZE_STRING_REPLACE_HYPHENS | \ - NORMALIZE_STRING_STRIP_ACCENTS | \ - NORMALIZE_STRING_LOWERCASE +DEFAULT_STRING_OPTIONS = _normalize.NORMALIZE_DEFAULT_STRING_OPTIONS # Token options NORMALIZE_TOKEN_REPLACE_HYPHENS = _normalize.NORMALIZE_TOKEN_REPLACE_HYPHENS @@ -34,16 +28,10 @@ NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE = _normalize.NORMALIZE_TOKEN_DELETE_OTHE NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC = _normalize.NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC NORMALIZE_TOKEN_REPLACE_DIGITS = _normalize.NORMALIZE_TOKEN_REPLACE_DIGITS -DEFAULT_TOKEN_OPTIONS = NORMALIZE_TOKEN_REPLACE_HYPHENS | \ - NORMALIZE_TOKEN_DELETE_FINAL_PERIOD | \ - NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS | \ - NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES | \ - NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE +DEFAULT_TOKEN_OPTIONS = _normalize.NORMALIZE_DEFAULT_TOKEN_OPTIONS -TOKEN_OPTIONS_DROP_PERIODS = NORMALIZE_TOKEN_DELETE_FINAL_PERIOD | \ - NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS - -DEFAULT_TOKEN_OPTIONS_NUMERIC = (DEFAULT_TOKEN_OPTIONS | NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC) +TOKEN_OPTIONS_DROP_PERIODS = _normalize.NORMALIZE_TOKEN_OPTIONS_DROP_PERIODS +DEFAULT_TOKEN_OPTIONS_NUMERIC = _normalize.NORMALIZE_DEFAULT_TOKEN_OPTIONS_NUMERIC def remove_parens(tokens): @@ -62,33 +50,7 @@ def remove_parens(tokens): def normalize_string(s, string_options=DEFAULT_STRING_OPTIONS): s = safe_decode(s) - if string_options & _normalize.NORMALIZE_STRING_LATIN_ASCII: - normalized = _normalize.normalize_string_latin(s, string_options) - else: - normalized = _normalize.normalize_string_utf8(s, string_options) - - return normalized - - -def normalize_token(s, t, token_options=DEFAULT_TOKEN_OPTIONS): - return _normalize.normalize_token(s, t, token_options) - - -def normalize_tokens_whitespace(s, raw_tokens, token_options=DEFAULT_TOKEN_OPTIONS): - last_end = 0 - tokens = [] - - for t in raw_tokens: - t_norm = _normalize.normalize_token(s, t, token_options) - t_class = token_types.from_id(t[-1]) - - if last_end < t[0]: - tokens.append((six.u(' '), token_types.WHITESPACE)) - last_end = sum(t[:2]) - - tokens.append((t_norm, t_class)) - - return tokens + return _normalize.normalize_string(s, string_options) def normalized_tokens(s, string_options=DEFAULT_STRING_OPTIONS, @@ -105,20 +67,10 @@ def normalized_tokens(s, string_options=DEFAULT_STRING_OPTIONS, Usage: normalized_tokens(u'St.-Barthélemy') ''' - normalized = normalize_string(s, string_options=string_options) - - # Tuples of (offset, len, type) - raw_tokens = tokenize_raw(normalized) - tokens = [] - last_end = 0 - - if not whitespace: - tokens = [(_normalize.normalize_token(normalized, t, token_options), - token_types.from_id(t[-1])) for t in raw_tokens] - else: - tokens = normalize_tokens_whitespace(normalized, raw_tokens, token_options=token_options) + s = safe_decode(s) + normalized_tokens = _normalize.normalized_tokens(s, string_options, token_options, whitespace) if strip_parentheticals: - return remove_parens(tokens) - else: - return tokens + normalized_tokens = remove_parens(normalized_tokens) + + return [(s, token_types.from_id(token_type)) for s, token_type in normalized_tokens] diff --git a/scripts/geodata/text/pynormalize.c b/scripts/geodata/text/pynormalize.c index 12f3735b..1ce2df7e 100644 --- a/scripts/geodata/text/pynormalize.c +++ b/scripts/geodata/text/pynormalize.c @@ -1,7 +1,6 @@ #include -#include "src/normalize.h" -#include "src/transliterate.h" +#include #if PY_MAJOR_VERSION >= 3 #define IS_PY3K @@ -19,9 +18,7 @@ struct module_state { static struct module_state _state; #endif - - -static PyObject *py_normalize_string_utf8(PyObject *self, PyObject *args) +static PyObject *py_normalize_string(PyObject *self, PyObject *args) { PyObject *arg1; uint64_t options; @@ -48,7 +45,7 @@ static PyObject *py_normalize_string_utf8(PyObject *self, PyObject *args) if (str == NULL) { PyErr_SetString(PyExc_TypeError, "Parameter could not be utf-8 encoded"); - goto exit_decref_unistr; + goto exit_normalize_decref_unistr; } char *input = PyBytes_AsString(str); @@ -56,13 +53,13 @@ static PyObject *py_normalize_string_utf8(PyObject *self, PyObject *args) #endif if (input == NULL) { - goto exit_decref_str; + goto exit_normalize_decref_str; } - char *normalized = normalize_string_utf8(input, options); + char *normalized = libpostal_normalize_string(input, options); if (normalized == NULL) { - goto exit_decref_str; + goto exit_normalize_decref_str; } PyObject *result = PyUnicode_DecodeUTF8((const char *)normalized, strlen(normalized), "strict"); @@ -70,7 +67,7 @@ static PyObject *py_normalize_string_utf8(PyObject *self, PyObject *args) if (result == NULL) { PyErr_SetString(PyExc_ValueError, "Result could not be utf-8 decoded"); - goto exit_decref_str; + goto exit_normalize_decref_str; } #ifndef IS_PY3K @@ -80,21 +77,26 @@ static PyObject *py_normalize_string_utf8(PyObject *self, PyObject *args) return result; -exit_decref_str: +exit_normalize_decref_str: #ifndef IS_PY3K Py_XDECREF(str); #endif -exit_decref_unistr: +exit_normalize_decref_unistr: Py_XDECREF(unistr); return 0; } -static PyObject *py_normalize_string_latin(PyObject *self, PyObject *args) +static PyObject *py_normalized_tokens(PyObject *self, PyObject *args) { PyObject *arg1; - uint64_t options; - if (!PyArg_ParseTuple(args, "OK:normalize", &arg1, &options)) { + uint64_t string_options = LIBPOSTAL_NORMALIZE_DEFAULT_STRING_OPTIONS; + uint64_t token_options = LIBPOSTAL_NORMALIZE_DEFAULT_TOKEN_OPTIONS; + uint32_t arg_whitespace = 0; + + PyObject *result = NULL; + + if (!PyArg_ParseTuple(args, "O|KKI:normalize", &arg1, &string_options, &token_options, &arg_whitespace)) { return 0; } @@ -117,7 +119,7 @@ static PyObject *py_normalize_string_latin(PyObject *self, PyObject *args) if (str == NULL) { PyErr_SetString(PyExc_TypeError, "Parameter could not be utf-8 encoded"); - goto exit_decref_unistr; + goto exit_normalized_tokens_decref_str; } char *input = PyBytes_AsString(str); @@ -125,98 +127,46 @@ static PyObject *py_normalize_string_latin(PyObject *self, PyObject *args) #endif if (input == NULL) { - goto exit_decref_str; + goto exit_normalized_tokens_decref_str; } - char *normalized = normalize_string_latin(input, strlen(input), options); + bool whitespace = arg_whitespace; - PyObject *result = PyUnicode_DecodeUTF8((const char *)normalized, strlen(normalized), "strict"); - free(normalized); - if (result == NULL) { - PyErr_SetString(PyExc_ValueError, - "Result could not be utf-8 decoded"); - goto exit_decref_str; + size_t num_tokens; + libpostal_normalized_token_t *normalized_tokens = libpostal_normalized_tokens(input, string_options, token_options, whitespace, &num_tokens); + + if (normalized_tokens == NULL) { + goto exit_normalized_tokens_decref_str; } - #ifndef IS_PY3K - Py_XDECREF(str); - #endif - Py_XDECREF(unistr); - - return result; - -exit_decref_str: -#ifndef IS_PY3K - Py_XDECREF(str); -#endif -exit_decref_unistr: - Py_XDECREF(unistr); - return 0; -} - - - -static PyObject *py_normalize_token(PyObject *self, PyObject *args) -{ - PyObject *s; - - uint32_t offset; - uint32_t len; - uint16_t type; - - uint64_t options; - if (!PyArg_ParseTuple(args, "O(IIH)K:normalize", &s, &offset, &len, &type, &options)) { - PyErr_SetString(PyExc_TypeError, - "Error parsing arguments"); - return 0; + result = PyList_New((Py_ssize_t)num_tokens); + if (!result) { + goto exit_free_normalized_tokens; } - token_t token = (token_t){(size_t)offset, (size_t)len, type}; - - PyObject *unistr = PyUnicode_FromObject(s); - if (unistr == NULL) { - PyErr_SetString(PyExc_TypeError, - "Parameter could not be converted to unicode in scanner"); - return 0; - } - - #ifdef IS_PY3K - // Python 3 encoding, supported by Python 3.3+ - - char *input = PyUnicode_AsUTF8(unistr); - - #else - // Python 2 encoding - - PyObject *str = PyUnicode_AsEncodedString(unistr, "utf-8", "strict"); - if (str == NULL) { - PyErr_SetString(PyExc_ValueError, - "Parameter could not be utf-8 encoded"); - goto exit_decref_unistr; + for (size_t i = 0; i < num_tokens; i++) { + libpostal_normalized_token_t normalized_token = normalized_tokens[i]; + char *token_str = normalized_token.str; + PyObject *py_token = PyUnicode_DecodeUTF8((const char *)token_str, strlen(token_str), "strict"); + if (py_token == NULL) { + Py_DECREF(result); + goto exit_free_normalized_tokens; } - char *input = PyBytes_AsString(str); + PyObject *t = PyTuple_New(2); + PyObject *py_token_type = PyInt_FromLong(normalized_token.token.type); - #endif + PyTuple_SetItem(t, 0, py_token); + PyTuple_SetItem(t, 1, py_token_type); - if (input == NULL) { - goto exit_decref_str; + // Note: PyList_SetItem steals a reference, so don't worry about DECREF + PyList_SetItem(result, (Py_ssize_t)i, t); } - char_array *token_buffer = char_array_new_size(token.len); - - add_normalized_token(token_buffer, input, token, options); - char *token_str = char_array_get_string(token_buffer); - PyObject *result = PyUnicode_DecodeUTF8((const char *)token_str, token_buffer->n - 1, "strict"); - - if (result == NULL) { - PyErr_SetString(PyExc_ValueError, - "Error decoding token"); - char_array_destroy(token_buffer); - goto exit_decref_str; + for (size_t i = 0; i < num_tokens; i++) { + free(normalized_tokens[i].str); } - - char_array_destroy(token_buffer); + free(normalized_tokens); #ifndef IS_PY3K Py_XDECREF(str); @@ -224,20 +174,24 @@ static PyObject *py_normalize_token(PyObject *self, PyObject *args) Py_XDECREF(unistr); return result; - -exit_decref_str: +exit_free_normalized_tokens: + for (size_t i = 0; i < num_tokens; i++) { + free(normalized_tokens[i].str); + } + free(normalized_tokens); +exit_normalized_tokens_decref_str: #ifndef IS_PY3K Py_XDECREF(str); #endif -exit_decref_unistr: +exit_normalized_tokens_decref_unistr: Py_XDECREF(unistr); return 0; } + static PyMethodDef normalize_methods[] = { - {"normalize_string_utf8", (PyCFunction)py_normalize_string_utf8, METH_VARARGS, "normalize_string_utf8(input, options)"}, - {"normalize_string_latin", (PyCFunction)py_normalize_string_latin, METH_VARARGS, "normalize_string_latin(input, options)"}, - {"normalize_token", (PyCFunction)py_normalize_token, METH_VARARGS, "normalize_token(input, options)"}, + {"normalize_string", (PyCFunction)py_normalize_string, METH_VARARGS, "normalize_string(input, options)"}, + {"normalized_tokens", (PyCFunction)py_normalized_tokens, METH_VARARGS, "normalize_token(input, string_options, token_options, whitespace)"}, {NULL, NULL}, }; @@ -295,32 +249,40 @@ init_normalize(void) { INITERROR; } - if (!transliteration_module_setup(NULL)) { + if (!libpostal_setup()) { PyErr_SetString(PyExc_RuntimeError, - "Could not load transliterate module"); + "Could not load libpostal"); Py_DECREF(module); INITERROR; } - PyModule_AddObject(module, "NORMALIZE_STRING_LATIN_ASCII", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_LATIN_ASCII)); - PyModule_AddObject(module, "NORMALIZE_STRING_TRANSLITERATE", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_TRANSLITERATE)); - PyModule_AddObject(module, "NORMALIZE_STRING_STRIP_ACCENTS", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_STRIP_ACCENTS)); - PyModule_AddObject(module, "NORMALIZE_STRING_DECOMPOSE", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_DECOMPOSE)); - PyModule_AddObject(module, "NORMALIZE_STRING_COMPOSE", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_COMPOSE)); - PyModule_AddObject(module, "NORMALIZE_STRING_LOWERCASE", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_LOWERCASE)); - PyModule_AddObject(module, "NORMALIZE_STRING_TRIM", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_TRIM)); - PyModule_AddObject(module, "NORMALIZE_STRING_REPLACE_HYPHENS", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_REPLACE_HYPHENS)); - PyModule_AddObject(module, "NORMALIZE_STRING_SIMPLE_LATIN_ASCII", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_SIMPLE_LATIN_ASCII)); + PyModule_AddObject(module, "NORMALIZE_STRING_LATIN_ASCII", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_LATIN_ASCII)); + PyModule_AddObject(module, "NORMALIZE_STRING_TRANSLITERATE", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_TRANSLITERATE)); + PyModule_AddObject(module, "NORMALIZE_STRING_STRIP_ACCENTS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_STRIP_ACCENTS)); + PyModule_AddObject(module, "NORMALIZE_STRING_DECOMPOSE", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_DECOMPOSE)); + PyModule_AddObject(module, "NORMALIZE_STRING_COMPOSE", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_COMPOSE)); + PyModule_AddObject(module, "NORMALIZE_STRING_LOWERCASE", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_LOWERCASE)); + PyModule_AddObject(module, "NORMALIZE_STRING_TRIM", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_TRIM)); + PyModule_AddObject(module, "NORMALIZE_STRING_REPLACE_HYPHENS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_REPLACE_HYPHENS)); + PyModule_AddObject(module, "NORMALIZE_STRING_SIMPLE_LATIN_ASCII", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_SIMPLE_LATIN_ASCII)); - PyModule_AddObject(module, "NORMALIZE_TOKEN_REPLACE_HYPHENS", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_REPLACE_HYPHENS)); - PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_HYPHENS", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_DELETE_HYPHENS)); - PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_FINAL_PERIOD", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_DELETE_FINAL_PERIOD)); - PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS)); - PyModule_AddObject(module, "NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES)); - PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE)); - PyModule_AddObject(module, "NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC)); - PyModule_AddObject(module, "NORMALIZE_TOKEN_REPLACE_DIGITS", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_REPLACE_DIGITS)); + PyModule_AddObject(module, "NORMALIZE_TOKEN_REPLACE_HYPHENS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_HYPHENS)); + PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_HYPHENS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_DELETE_HYPHENS)); + PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_FINAL_PERIOD", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_DELETE_FINAL_PERIOD)); + PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS)); + PyModule_AddObject(module, "NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES)); + PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE)); + PyModule_AddObject(module, "NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC)); + PyModule_AddObject(module, "NORMALIZE_TOKEN_REPLACE_DIGITS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_DIGITS)); + + + PyModule_AddObject(module, "NORMALIZE_DEFAULT_STRING_OPTIONS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_DEFAULT_STRING_OPTIONS)); + PyModule_AddObject(module, "NORMALIZE_DEFAULT_TOKEN_OPTIONS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_DEFAULT_TOKEN_OPTIONS)); + + PyModule_AddObject(module, "NORMALIZE_TOKEN_OPTIONS_DROP_PERIODS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_OPTIONS_DROP_PERIODS)); + + PyModule_AddObject(module, "NORMALIZE_DEFAULT_TOKEN_OPTIONS_NUMERIC", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_DEFAULT_TOKEN_OPTIONS_NUMERIC)); #if PY_MAJOR_VERSION >= 3 diff --git a/scripts/geodata/text/pytokenize.c b/scripts/geodata/text/pytokenize.c index 7986bae3..a69a86ea 100644 --- a/scripts/geodata/text/pytokenize.c +++ b/scripts/geodata/text/pytokenize.c @@ -1,6 +1,6 @@ #include -#include "src/scanner.h" +#include #if PY_MAJOR_VERSION >= 3 #define IS_PY3K @@ -18,14 +18,17 @@ struct module_state { static struct module_state _state; #endif - static PyObject *py_tokenize(PyObject *self, PyObject *args) { PyObject *arg1; - if (!PyArg_ParseTuple(args, "O:tokenize", &arg1)) { + uint32_t arg_whitespace = 0; + + if (!PyArg_ParseTuple(args, "OI:tokenize", &arg1, &arg_whitespace)) { return 0; } + bool whitespace = arg_whitespace; + PyObject *unistr = PyUnicode_FromObject(arg1); if (unistr == NULL) { PyErr_SetString(PyExc_TypeError, @@ -57,26 +60,28 @@ static PyObject *py_tokenize(PyObject *self, PyObject *args) goto error_decref_str; } - token_array *tokens = tokenize(input); + size_t num_tokens; + + libpostal_token_t *tokens = libpostal_tokenize(input, whitespace, &num_tokens); if (tokens == NULL) { goto error_decref_str; } - PyObject *result = PyTuple_New(tokens->n); + PyObject *result = PyTuple_New(num_tokens); if (!result) { - token_array_destroy(tokens); + free(tokens); goto error_decref_str; return 0; } PyObject *tuple; - token_t token; - for (size_t i = 0; i < tokens->n; i++) { - token = tokens->a[i]; + libpostal_token_t token; + for (size_t i = 0; i < num_tokens; i++) { + token = tokens[i]; tuple = Py_BuildValue("III", token.offset, token.len, token.type); if (PyTuple_SetItem(result, i, tuple) < 0) { - token_array_destroy(tokens); + free(tokens); goto error_decref_str; } } @@ -86,7 +91,7 @@ static PyObject *py_tokenize(PyObject *self, PyObject *args) #endif Py_XDECREF(unistr); - token_array_destroy(tokens); + free(tokens); return result; @@ -100,12 +105,10 @@ error_decref_unistr: } static PyMethodDef tokenize_methods[] = { - {"tokenize", (PyCFunction)py_tokenize, METH_VARARGS, "tokenize(text)"}, + {"tokenize", (PyCFunction)py_tokenize, METH_VARARGS, "tokenize(text, whitespace)"}, {NULL, NULL}, }; - - #ifdef IS_PY3K static int tokenize_traverse(PyObject *m, visitproc visit, void *arg) { diff --git a/scripts/geodata/text/tokenize.py b/scripts/geodata/text/tokenize.py index d3d18832..a05022bc 100644 --- a/scripts/geodata/text/tokenize.py +++ b/scripts/geodata/text/tokenize.py @@ -3,12 +3,9 @@ from geodata.text import _tokenize from geodata.text.token_types import token_types -def tokenize_raw(s): - return _tokenize.tokenize(safe_decode(s)) - -def tokenize(s): +def tokenize(s, whitespace=False): u = safe_decode(s) s = safe_encode(s) return [(safe_decode(s[start:start + length]), token_types.from_id(token_type)) - for start, length, token_type in _tokenize.tokenize(u)] + for start, length, token_type in _tokenize.tokenize(u, whitespace)] diff --git a/scripts/setup.py b/scripts/setup.py index a25b6b26..6bbf8891 100644 --- a/scripts/setup.py +++ b/scripts/setup.py @@ -2,9 +2,7 @@ import os from setuptools import setup, Extension, find_packages -this_dir = os.path.dirname(__file__) -PROJECT_DIR = os.path.join(this_dir, os.pardir) -SRC_DIR = os.path.join(PROJECT_DIR, 'src') +RESOURCES_DIR = 'resources' def main(): @@ -14,35 +12,29 @@ def main(): packages=find_packages(), ext_modules=[ Extension('geodata.text._tokenize', - sources=[os.path.join(SRC_DIR, f) - for f in ('scanner.c', - 'string_utils.c', - 'tokens.c', - 'utf8proc/utf8proc.c', - ) - ] + ['geodata/text/pytokenize.c'], - include_dirs=[PROJECT_DIR], - extra_compile_args=['-O0', '-std=gnu99', + sources=['geodata/text/pytokenize.c'], + libraries=['postal'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'], + extra_compile_args=['-std=c99', '-Wno-unused-function'], ), Extension('geodata.text._normalize', - sources=[os.path.join(SRC_DIR, f) - for f in ('normalize.c', - 'string_utils.c', - 'utf8proc/utf8proc.c', - 'tokens.c', - 'unicode_scripts.c', - 'transliterate.c', - 'file_utils.c', - 'trie.c', - 'trie_search.c',) - ] + ['geodata/text/pynormalize.c'], - include_dirs=[PROJECT_DIR], - extra_compile_args=['-std=gnu99', '-DHAVE_CONFIG_H', - '-DLIBPOSTAL_DATA_DIR="{}"'.format(os.getenv('LIBPOSTAL_DATA_DIR', os.path.realpath(os.path.join(PROJECT_DIR, 'data')))), + sources=['geodata/text/pynormalize.c'], + libraries=['postal'], + include_dirs=['/usr/local/include'], + library_dirs=['/usr/local/lib'], + extra_compile_args=['-std=c99', '-Wno-unused-function'], ), ], + data_files=[ + (os.path.join(RESOURCES_DIR, os.path.relpath(d, RESOURCES_DIR)), [os.path.join(d, filename) for filename in filenames]) + for d, _, filenames in os.walk(RESOURCES_DIR) + ], + package_data={ + 'geodata': ['**/*.sh'] + }, include_package_data=True, zip_safe=False, url='http://mapzen.com', diff --git a/src/libpostal.c b/src/libpostal.c index d226413e..c969d86c 100644 --- a/src/libpostal.c +++ b/src/libpostal.c @@ -1137,6 +1137,76 @@ bool libpostal_setup_language_classifier_datadir(char *datadir) { return true; } + +libpostal_token_t *libpostal_tokenize(char *input, bool whitespace, size_t *n) { + token_array *tokens = NULL; + if (!whitespace) { + tokens = tokenize(input); + } else { + tokens = tokenize_keep_whitespace(input); + } + + if (tokens == NULL) { + return NULL; + } + + libpostal_token_t *a = tokens->a; + *n = tokens->n; + free(tokens); + return a; +} + +char *libpostal_normalize_string(char *str, uint64_t options) { + if (options & LIBPOSTAL_NORMALIZE_STRING_LATIN_ASCII) { + return normalize_string_latin(str, strlen(str), options); + } else { + return normalize_string_utf8(str, options); + } +} + +libpostal_normalized_token_t *libpostal_normalized_tokens(char *input, uint64_t string_options, uint64_t token_options, bool whitespace, size_t *n) { + if (input == NULL) { + return NULL; + } + char *normalized = libpostal_normalize_string(input, string_options); + if (normalized == NULL) { + return NULL; + } + + token_array *tokens = NULL; + if (!whitespace) { + tokens = tokenize(normalized); + } else { + tokens = tokenize_keep_whitespace(normalized); + } + + if (tokens == NULL || tokens->a == NULL) { + free(normalized); + return NULL; + } + + size_t num_tokens = tokens->n; + token_t *token_array = tokens->a; + char_array *normalized_token = char_array_new_size(strlen(normalized)); + + libpostal_normalized_token_t *result = malloc(sizeof(libpostal_normalized_token_t) * num_tokens); + + for (size_t i = 0; i < num_tokens; i++) { + token_t token = token_array[i]; + char_array_clear(normalized_token); + add_normalized_token(normalized_token, normalized, token, token_options); + char *token_str = strdup(char_array_get_string(normalized_token)); + result[i] = (libpostal_normalized_token_t){token_str, token}; + } + + free(normalized); + token_array_destroy(tokens); + char_array_destroy(normalized_token); + + *n = num_tokens; + return result; +} + bool libpostal_setup_language_classifier(void) { return libpostal_setup_language_classifier_datadir(NULL); } diff --git a/src/libpostal.h b/src/libpostal.h index 3b86dea3..274c6391 100644 --- a/src/libpostal.h +++ b/src/libpostal.h @@ -12,6 +12,67 @@ extern "C" { #define LIBPOSTAL_MAX_LANGUAGE_LEN 4 +// Doing these as #defines so we can duplicate the values exactly in Python + + +typedef enum { + LIBPOSTAL_TOKEN_TYPE_END = 0, // Null byte + // Word types + LIBPOSTAL_TOKEN_TYPE_WORD = 1, // Any letter-only word (includes all unicode letters) + LIBPOSTAL_TOKEN_TYPE_ABBREVIATION = 2, // Loose abbreviations (roughly anything containing a "." as we don't care about sentences in addresses) + LIBPOSTAL_TOKEN_TYPE_IDEOGRAPHIC_CHAR = 3, // For languages that don't separate on whitespace (e.g. Chinese, Japanese, Korean), separate by character + LIBPOSTAL_TOKEN_TYPE_HANGUL_SYLLABLE = 4, // Hangul syllable sequences which contain more than one codepoint + LIBPOSTAL_TOKEN_TYPE_ACRONYM = 5, // Specifically things like U.N. where we may delete internal periods + + LIBPOSTAL_TOKEN_TYPE_PHRASE = 10, // Not part of the first stage tokenizer, but may be used after phrase parsing + + // Special tokens + LIBPOSTAL_TOKEN_TYPE_EMAIL = 20, // Make sure emails are tokenized altogether + LIBPOSTAL_TOKEN_TYPE_URL = 21, // Make sure urls are tokenized altogether + LIBPOSTAL_TOKEN_TYPE_US_PHONE = 22, // US phone number (with or without country code) + LIBPOSTAL_TOKEN_TYPE_INTL_PHONE = 23, // A non-US phone number (must have country code) + + // Numbers and numeric types + LIBPOSTAL_TOKEN_TYPE_NUMERIC = 50, // Any sequence containing a digit + LIBPOSTAL_TOKEN_TYPE_ORDINAL = 51, // 1st, 2nd, 1er, 1 etc. + LIBPOSTAL_TOKEN_TYPE_ROMAN_NUMERAL = 52, // II, III, VI, etc. + LIBPOSTAL_TOKEN_TYPE_IDEOGRAPHIC_NUMBER = 53, // All numeric ideographic characters, includes e.g. Han numbers and chars like "²" + + // Punctuation types, may separate a phrase + LIBPOSTAL_TOKEN_TYPE_PERIOD = 100, + LIBPOSTAL_TOKEN_TYPE_EXCLAMATION = 101, + LIBPOSTAL_TOKEN_TYPE_QUESTION_MARK = 102, + LIBPOSTAL_TOKEN_TYPE_COMMA = 103, + LIBPOSTAL_TOKEN_TYPE_COLON = 104, + LIBPOSTAL_TOKEN_TYPE_SEMICOLON = 105, + LIBPOSTAL_TOKEN_TYPE_PLUS = 106, + LIBPOSTAL_TOKEN_TYPE_AMPERSAND = 107, + LIBPOSTAL_TOKEN_TYPE_AT_SIGN = 108, + LIBPOSTAL_TOKEN_TYPE_POUND = 109, + LIBPOSTAL_TOKEN_TYPE_ELLIPSIS = 110, + LIBPOSTAL_TOKEN_TYPE_DASH = 111, + LIBPOSTAL_TOKEN_TYPE_BREAKING_DASH = 112, + LIBPOSTAL_TOKEN_TYPE_HYPHEN = 113, + LIBPOSTAL_TOKEN_TYPE_PUNCT_OPEN = 114, + LIBPOSTAL_TOKEN_TYPE_PUNCT_CLOSE = 115, + LIBPOSTAL_TOKEN_TYPE_DOUBLE_QUOTE = 119, + LIBPOSTAL_TOKEN_TYPE_SINGLE_QUOTE = 120, + LIBPOSTAL_TOKEN_TYPE_OPEN_QUOTE = 121, + LIBPOSTAL_TOKEN_TYPE_CLOSE_QUOTE = 122, + LIBPOSTAL_TOKEN_TYPE_SLASH = 124, + LIBPOSTAL_TOKEN_TYPE_BACKSLASH = 125, + LIBPOSTAL_TOKEN_TYPE_GREATER_THAN = 126, + LIBPOSTAL_TOKEN_TYPE_LESS_THAN = 127, + + // Non-letters and whitespace + LIBPOSTAL_TOKEN_TYPE_OTHER = 200, + LIBPOSTAL_TOKEN_TYPE_WHITESPACE = 300, + LIBPOSTAL_TOKEN_TYPE_NEWLINE = 301, + + LIBPOSTAL_TOKEN_TYPE_INVALID_CHAR = 500 +} libpostal_token_type_t; + + /* Address dictionaries */ @@ -99,6 +160,55 @@ bool libpostal_setup_parser(void); bool libpostal_setup_parser_datadir(char *datadir); void libpostal_teardown_parser(void); +typedef struct libpostal_token { + size_t offset; + size_t len; + uint16_t type; +} libpostal_token_t; + +libpostal_token_t *libpostal_tokenize(char *input, bool whitespace, size_t *n); + +// Normalize string options +#define LIBPOSTAL_NORMALIZE_STRING_LATIN_ASCII 1 << 0 +#define LIBPOSTAL_NORMALIZE_STRING_TRANSLITERATE 1 << 1 +#define LIBPOSTAL_NORMALIZE_STRING_STRIP_ACCENTS 1 << 2 +#define LIBPOSTAL_NORMALIZE_STRING_DECOMPOSE 1 << 3 +#define LIBPOSTAL_NORMALIZE_STRING_LOWERCASE 1 << 4 +#define LIBPOSTAL_NORMALIZE_STRING_TRIM 1 << 5 +#define LIBPOSTAL_NORMALIZE_STRING_REPLACE_HYPHENS 1 << 6 +#define LIBPOSTAL_NORMALIZE_STRING_COMPOSE 1 << 7 +#define LIBPOSTAL_NORMALIZE_STRING_SIMPLE_LATIN_ASCII 1 << 8 +#define LIBPOSTAL_NORMALIZE_STRING_REPLACE_NUMEX 1 << 9 + +// Normalize token options +#define LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_HYPHENS 1 << 0 +#define LIBPOSTAL_NORMALIZE_TOKEN_DELETE_HYPHENS 1 << 1 +#define LIBPOSTAL_NORMALIZE_TOKEN_DELETE_FINAL_PERIOD 1 << 2 +#define LIBPOSTAL_NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS 1 << 3 +#define LIBPOSTAL_NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES 1 << 4 +#define LIBPOSTAL_NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE 1 << 5 +#define LIBPOSTAL_NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC 1 << 6 +#define LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_DIGITS 1 << 7 +#define LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_NUMERIC_TOKEN_LETTERS 1 << 8 + +#define LIBPOSTAL_NORMALIZE_DEFAULT_STRING_OPTIONS (LIBPOSTAL_NORMALIZE_STRING_LATIN_ASCII | LIBPOSTAL_NORMALIZE_STRING_COMPOSE | LIBPOSTAL_NORMALIZE_STRING_TRIM | LIBPOSTAL_NORMALIZE_STRING_REPLACE_HYPHENS | LIBPOSTAL_NORMALIZE_STRING_STRIP_ACCENTS | LIBPOSTAL_NORMALIZE_STRING_LOWERCASE) + +#define LIBPOSTAL_NORMALIZE_DEFAULT_TOKEN_OPTIONS (LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_HYPHENS | LIBPOSTAL_NORMALIZE_TOKEN_DELETE_FINAL_PERIOD | LIBPOSTAL_NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS | LIBPOSTAL_NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES | LIBPOSTAL_NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE) + +#define LIBPOSTAL_NORMALIZE_TOKEN_OPTIONS_DROP_PERIODS (LIBPOSTAL_NORMALIZE_TOKEN_DELETE_FINAL_PERIOD | LIBPOSTAL_NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS) + +#define LIBPOSTAL_NORMALIZE_DEFAULT_TOKEN_OPTIONS_NUMERIC (LIBPOSTAL_NORMALIZE_DEFAULT_TOKEN_OPTIONS | LIBPOSTAL_NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC) + +char *libpostal_normalize_string(char *input, uint64_t options); + + +typedef struct libpostal_normalized_token { + char *str; + libpostal_token_t token; +} libpostal_normalized_token_t; + +libpostal_normalized_token_t *libpostal_normalized_tokens(char *input, uint64_t string_options, uint64_t token_options, bool whitespace, size_t *n); + bool libpostal_setup_language_classifier(void); bool libpostal_setup_language_classifier_datadir(char *datadir); void libpostal_teardown_language_classifier(void); diff --git a/src/normalize.h b/src/normalize.h index d485f67f..755b7cee 100644 --- a/src/normalize.h +++ b/src/normalize.h @@ -30,6 +30,7 @@ As well as normalizations for individual string tokens: #include "constants.h" #include "klib/khash.h" +#include "libpostal.h" #include "string_utils.h" #include "utf8proc/utf8proc.h" #include "unicode_scripts.h" @@ -39,25 +40,26 @@ As well as normalizations for individual string tokens: #include "tokens.h" #include "vector.h" -#define NORMALIZE_STRING_LATIN_ASCII 1 << 0 -#define NORMALIZE_STRING_TRANSLITERATE 1 << 1 -#define NORMALIZE_STRING_STRIP_ACCENTS 1 << 2 -#define NORMALIZE_STRING_DECOMPOSE 1 << 3 -#define NORMALIZE_STRING_LOWERCASE 1 << 4 -#define NORMALIZE_STRING_TRIM 1 << 5 -#define NORMALIZE_STRING_REPLACE_HYPHENS 1 << 6 -#define NORMALIZE_STRING_COMPOSE 1 << 7 -#define NORMALIZE_STRING_SIMPLE_LATIN_ASCII 1 << 8 -#define NORMALIZE_STRING_REPLACE_NUMEX 1 << 9 +#define NORMALIZE_STRING_LATIN_ASCII LIBPOSTAL_NORMALIZE_STRING_LATIN_ASCII +#define NORMALIZE_STRING_TRANSLITERATE LIBPOSTAL_NORMALIZE_STRING_TRANSLITERATE +#define NORMALIZE_STRING_STRIP_ACCENTS LIBPOSTAL_NORMALIZE_STRING_STRIP_ACCENTS +#define NORMALIZE_STRING_DECOMPOSE LIBPOSTAL_NORMALIZE_STRING_DECOMPOSE +#define NORMALIZE_STRING_LOWERCASE LIBPOSTAL_NORMALIZE_STRING_LOWERCASE +#define NORMALIZE_STRING_TRIM LIBPOSTAL_NORMALIZE_STRING_TRIM +#define NORMALIZE_STRING_REPLACE_HYPHENS LIBPOSTAL_NORMALIZE_STRING_REPLACE_HYPHENS +#define NORMALIZE_STRING_COMPOSE LIBPOSTAL_NORMALIZE_STRING_COMPOSE +#define NORMALIZE_STRING_SIMPLE_LATIN_ASCII LIBPOSTAL_NORMALIZE_STRING_SIMPLE_LATIN_ASCII +#define NORMALIZE_STRING_REPLACE_NUMEX LIBPOSTAL_NORMALIZE_STRING_REPLACE_NUMEX -#define NORMALIZE_TOKEN_REPLACE_HYPHENS 1 << 0 -#define NORMALIZE_TOKEN_DELETE_HYPHENS 1 << 1 -#define NORMALIZE_TOKEN_DELETE_FINAL_PERIOD 1 << 2 -#define NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS 1 << 3 -#define NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES 1 << 4 -#define NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE 1 << 5 -#define NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC 1 << 6 -#define NORMALIZE_TOKEN_REPLACE_DIGITS 1 << 7 +#define NORMALIZE_TOKEN_REPLACE_HYPHENS LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_HYPHENS +#define NORMALIZE_TOKEN_DELETE_HYPHENS LIBPOSTAL_NORMALIZE_TOKEN_DELETE_HYPHENS +#define NORMALIZE_TOKEN_DELETE_FINAL_PERIOD LIBPOSTAL_NORMALIZE_TOKEN_DELETE_FINAL_PERIOD +#define NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS LIBPOSTAL_NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS +#define NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES LIBPOSTAL_NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES +#define NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE LIBPOSTAL_NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE +#define NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC LIBPOSTAL_NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC +#define NORMALIZE_TOKEN_REPLACE_DIGITS LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_DIGITS +#define NORMALIZE_TOKEN_REPLACE_NUMERIC_TOKEN_LETTERS LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_NUMERIC_TOKEN_LETTERS // Replace digits with capital D e.g. 10013 => DDDDD, intended for use with lowercased strings #define DIGIT_CHAR "D" diff --git a/src/token_types.h b/src/token_types.h index d746ae89..31cc2ba9 100644 --- a/src/token_types.h +++ b/src/token_types.h @@ -1,64 +1,60 @@ #ifndef TOKEN_TYPES_H #define TOKEN_TYPES_H +#include "libpostal.h" + // Doing these as #defines so we can duplicate the values exactly in Python -#define END 0 // Null byte -// Word types -#define WORD 1 // Any letter-only word (includes all unicode letters) -#define ABBREVIATION 2 // Loose abbreviations (roughly anything containing a "." as we don't care about sentences in addresses) -#define IDEOGRAPHIC_CHAR 3 // For languages that don't separate on whitespace (e.g. Chinese, Japanese, Korean), separate by character -#define HANGUL_SYLLABLE 4 // Hangul syllable sequences which contain more than one codepoint -#define ACRONYM 5 // Specifically things like U.N. where we may delete internal periods +#define END LIBPOSTAL_TOKEN_TYPE_END -#define PHRASE 10 // Not part of the first stage tokenizer, but may be used after phrase parsing +#define WORD LIBPOSTAL_TOKEN_TYPE_WORD +#define ABBREVIATION LIBPOSTAL_TOKEN_TYPE_ABBREVIATION +#define IDEOGRAPHIC_CHAR LIBPOSTAL_TOKEN_TYPE_IDEOGRAPHIC_CHAR +#define HANGUL_SYLLABLE LIBPOSTAL_TOKEN_TYPE_HANGUL_SYLLABLE +#define ACRONYM LIBPOSTAL_TOKEN_TYPE_ACRONYM +#define PHRASE LIBPOSTAL_TOKEN_TYPE_PHRASE -// Special tokens -#define EMAIL 20 // Make sure emails are tokenized altogether -#define URL 21 // Make sure urls are tokenized altogether -#define US_PHONE 22 // US phone number (with or without country code) -#define INTL_PHONE 23 // A non-US phone number (must have country code) +#define EMAIL LIBPOSTAL_TOKEN_TYPE_EMAIL +#define URL LIBPOSTAL_TOKEN_TYPE_URL +#define US_PHONE LIBPOSTAL_TOKEN_TYPE_US_PHONE +#define INTL_PHONE LIBPOSTAL_TOKEN_TYPE_INTL_PHONE -// Numbers and numeric types -#define NUMERIC 50 // Any sequence containing a digit -#define ORDINAL 51 // 1st, 2nd, 1er, 1 etc. -#define ROMAN_NUMERAL 52 // II, III, VI, etc. -#define IDEOGRAPHIC_NUMBER 53 // All numeric ideographic characters, includes e.g. Han numbers and chars like "²" +#define NUMERIC LIBPOSTAL_TOKEN_TYPE_NUMERIC +#define ORDINAL LIBPOSTAL_TOKEN_TYPE_ORDINAL +#define ROMAN_NUMERAL LIBPOSTAL_TOKEN_TYPE_ROMAN_NUMERAL +#define IDEOGRAPHIC_NUMBER LIBPOSTAL_TOKEN_TYPE_IDEOGRAPHIC_NUMBER +#define PERIOD LIBPOSTAL_TOKEN_TYPE_PERIOD +#define EXCLAMATION LIBPOSTAL_TOKEN_TYPE_EXCLAMATION +#define QUESTION_MARK LIBPOSTAL_TOKEN_TYPE_QUESTION_MARK +#define COMMA LIBPOSTAL_TOKEN_TYPE_COMMA +#define COLON LIBPOSTAL_TOKEN_TYPE_COLON +#define SEMICOLON LIBPOSTAL_TOKEN_TYPE_SEMICOLON +#define PLUS LIBPOSTAL_TOKEN_TYPE_PLUS +#define AMPERSAND LIBPOSTAL_TOKEN_TYPE_AMPERSAND +#define AT_SIGN LIBPOSTAL_TOKEN_TYPE_AT_SIGN +#define POUND LIBPOSTAL_TOKEN_TYPE_POUND +#define ELLIPSIS LIBPOSTAL_TOKEN_TYPE_ELLIPSIS +#define DASH LIBPOSTAL_TOKEN_TYPE_DASH +#define BREAKING_DASH LIBPOSTAL_TOKEN_TYPE_BREAKING_DASH +#define HYPHEN LIBPOSTAL_TOKEN_TYPE_HYPHEN +#define PUNCT_OPEN LIBPOSTAL_TOKEN_TYPE_PUNCT_OPEN +#define PUNCT_CLOSE LIBPOSTAL_TOKEN_TYPE_PUNCT_CLOSE +#define DOUBLE_QUOTE LIBPOSTAL_TOKEN_TYPE_DOUBLE_QUOTE +#define SINGLE_QUOTE LIBPOSTAL_TOKEN_TYPE_SINGLE_QUOTE +#define OPEN_QUOTE LIBPOSTAL_TOKEN_TYPE_OPEN_QUOTE +#define CLOSE_QUOTE LIBPOSTAL_TOKEN_TYPE_CLOSE_QUOTE +#define SLASH LIBPOSTAL_TOKEN_TYPE_SLASH +#define BACKSLASH LIBPOSTAL_TOKEN_TYPE_BACKSLASH +#define GREATER_THAN LIBPOSTAL_TOKEN_TYPE_GREATER_THAN +#define LESS_THAN LIBPOSTAL_TOKEN_TYPE_LESS_THAN -// Punctuation types, may separate a phrase -#define PERIOD 100 -#define EXCLAMATION 101 -#define QUESTION_MARK 102 -#define COMMA 103 -#define COLON 104 -#define SEMICOLON 105 -#define PLUS 106 -#define AMPERSAND 107 -#define AT_SIGN 108 -#define POUND 109 -#define ELLIPSIS 110 -#define DASH 111 -#define BREAKING_DASH 112 -#define HYPHEN 113 -#define PUNCT_OPEN 114 -#define PUNCT_CLOSE 115 -#define DOUBLE_QUOTE 119 -#define SINGLE_QUOTE 120 -#define OPEN_QUOTE 121 -#define CLOSE_QUOTE 122 -#define SLASH 124 -#define BACKSLASH 125 -#define GREATER_THAN 126 -#define LESS_THAN 127 +#define OTHER LIBPOSTAL_TOKEN_TYPE_OTHER +#define WHITESPACE LIBPOSTAL_TOKEN_TYPE_WHITESPACE +#define NEWLINE LIBPOSTAL_TOKEN_TYPE_NEWLINE -// Non-letters and whitespace -#define OTHER 200 -#define WHITESPACE 300 -#define NEWLINE 301 - -#define INVALID_CHAR 500 +#define INVALID_CHAR LIBPOSTAL_TOKEN_TYPE_INVALID_CHAR #define is_word_token(type) ((type) == WORD || (type) == ABBREVIATION || (type) == ACRONYM || (type) == IDEOGRAPHIC_CHAR || (type) == HANGUL_SYLLABLE) diff --git a/src/tokens.h b/src/tokens.h index 6b314417..8823a628 100644 --- a/src/tokens.h +++ b/src/tokens.h @@ -12,11 +12,7 @@ #include "token_types.h" #include "vector.h" -typedef struct token { - size_t offset; - size_t len; - uint16_t type; -} token_t; +typedef libpostal_token_t token_t; VECTOR_INIT(token_array, token_t)