Merge pull request #294 from openvenues/lieu_api
Near-duplicate detection and address deduping
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
7-eleven|7 eleven|7-11|seven-eleven|seven eleven|seveneleven|seven-11|seven 11|7-elevens|7 elevens|7-11s|seven-elevens|seven elevens|sevenelevens|seven-11s|seven 11s|sevel
|
||||
7-eleven|7 eleven|7-11|seven-eleven|seven eleven|seveneleven|seven-11|seven 11|7-elevens|7 elevens|7-11s|seven-elevens|seven elevens|sevenelevens|seven-11s|seven 11s|sevel|7 11
|
||||
a&w|a & w|a and w|a&ws|a & ws|a and ws|a&w restaurants|a & w restaurants|a and w restaurants
|
||||
ace hardware|ace hardwares
|
||||
adidas
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
b
|
||||
d
|
||||
e
|
||||
|
||||
@@ -13,7 +13,7 @@ el
|
||||
els
|
||||
es
|
||||
entre
|
||||
i
|
||||
i|&
|
||||
l'
|
||||
la
|
||||
les
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
c
|
||||
j
|
||||
s
|
||||
|
||||
@@ -1 +1 @@
|
||||
a
|
||||
a|&
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
c
|
||||
n
|
||||
o
|
||||
|
||||
1
resources/dictionaries/da/stopwords.txt
Normal file
1
resources/dictionaries/da/stopwords.txt
Normal file
@@ -0,0 +1 @@
|
||||
og|&
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
a
|
||||
b
|
||||
ch
|
||||
|
||||
@@ -1,45 +1,92 @@
|
||||
&
|
||||
aat
|
||||
act
|
||||
ab
|
||||
al
|
||||
abby
|
||||
ak
|
||||
al
|
||||
alee
|
||||
ally
|
||||
aly
|
||||
ar
|
||||
az
|
||||
ant
|
||||
app
|
||||
apt
|
||||
arc
|
||||
art
|
||||
arty
|
||||
ba
|
||||
bc
|
||||
bot
|
||||
byu
|
||||
c
|
||||
ca
|
||||
carp
|
||||
cause
|
||||
ce
|
||||
co
|
||||
col
|
||||
con
|
||||
coop
|
||||
cor
|
||||
cowy
|
||||
ct
|
||||
de
|
||||
dc
|
||||
div
|
||||
divers
|
||||
d
|
||||
doc
|
||||
dup
|
||||
e
|
||||
elb
|
||||
ex
|
||||
f
|
||||
fit
|
||||
fl
|
||||
form
|
||||
fry
|
||||
g
|
||||
ga
|
||||
gen
|
||||
gra
|
||||
h
|
||||
hi
|
||||
hon
|
||||
i
|
||||
id
|
||||
il
|
||||
imp
|
||||
in
|
||||
ia
|
||||
is
|
||||
j
|
||||
jbt
|
||||
k
|
||||
ks
|
||||
ky
|
||||
l
|
||||
la
|
||||
lit
|
||||
low
|
||||
lynn
|
||||
m
|
||||
ma
|
||||
me
|
||||
mb
|
||||
md
|
||||
mem
|
||||
mi
|
||||
miss
|
||||
mid
|
||||
mil
|
||||
mun
|
||||
mn
|
||||
mr
|
||||
ms
|
||||
mo
|
||||
mt
|
||||
m
|
||||
n
|
||||
nb
|
||||
nc
|
||||
@@ -58,35 +105,71 @@ nw
|
||||
nwt
|
||||
nv
|
||||
ny
|
||||
o
|
||||
oh
|
||||
on
|
||||
ok
|
||||
or
|
||||
out
|
||||
p
|
||||
pa
|
||||
pass
|
||||
pe
|
||||
pei
|
||||
plat
|
||||
pur
|
||||
q
|
||||
qc
|
||||
qld
|
||||
quad
|
||||
r
|
||||
ra
|
||||
ran
|
||||
rep
|
||||
reps
|
||||
rev
|
||||
ri
|
||||
ro
|
||||
row
|
||||
rowy
|
||||
s
|
||||
sa
|
||||
sc
|
||||
sd
|
||||
se
|
||||
sec
|
||||
sect
|
||||
sen
|
||||
sh
|
||||
shun
|
||||
sk
|
||||
sw
|
||||
t
|
||||
tas
|
||||
thick
|
||||
thro
|
||||
tn
|
||||
tri
|
||||
tx
|
||||
tun
|
||||
u
|
||||
up
|
||||
ut
|
||||
un
|
||||
vic
|
||||
vt
|
||||
v
|
||||
va
|
||||
via
|
||||
vic
|
||||
vill
|
||||
vis
|
||||
vt
|
||||
w
|
||||
wa
|
||||
wv
|
||||
wi
|
||||
yt
|
||||
wy
|
||||
wyn
|
||||
x
|
||||
y
|
||||
yt
|
||||
z
|
||||
@@ -1,3 +1,5 @@
|
||||
associates|assoc
|
||||
association|assoc
|
||||
bank
|
||||
b corporation|b corp|bcorp
|
||||
charitable incorporated organization|cio|c i o
|
||||
@@ -34,7 +36,7 @@ limited liability limited partnership|lllp|l l l p
|
||||
limited liability partnership|llp|l l p
|
||||
limited partnership|lp|l p
|
||||
look through company|look through co|lookthrough company|lookthrough co|ltc
|
||||
national association|na|n a
|
||||
national association|na|n a|nat assoc|natl assoc
|
||||
national trust and savings association|national trust & savings association|nt & sa|nt and sa|nt sa|ntsa
|
||||
no liability|nl|n l
|
||||
nonprofit|non profit
|
||||
|
||||
@@ -252,6 +252,8 @@ salon
|
||||
sanctuary|sanct
|
||||
sauna
|
||||
secondary school
|
||||
service|svc
|
||||
services|svcs|svc
|
||||
shelter
|
||||
sheriff's department|sherrifs department|sheriff's dept|sherrifs dept
|
||||
sherrif's office|sherffis office|sheriff's ofc|sheriffs ofc
|
||||
@@ -267,6 +269,7 @@ stadium
|
||||
station|sta|stn
|
||||
steakhouse
|
||||
store|stor
|
||||
stores
|
||||
studio
|
||||
studios
|
||||
subdivision
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
and
|
||||
a
|
||||
and|&
|
||||
all
|
||||
at
|
||||
between|betw|btwn|btw|btween|b / t
|
||||
by
|
||||
for
|
||||
in
|
||||
of
|
||||
on
|
||||
the
|
||||
to
|
||||
via
|
||||
opposite
|
||||
opposite|opp
|
||||
@@ -18,6 +18,8 @@ greater|grtr|gtr
|
||||
greens|grns
|
||||
groves|grvs
|
||||
heights|hghts|hgts|hieghts|ht|hts|hgths
|
||||
hill|hl
|
||||
hills|hls
|
||||
international|intl|int'l
|
||||
lake|lk
|
||||
lakes|lks
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
c
|
||||
cr
|
||||
d
|
||||
|
||||
@@ -26,4 +26,4 @@ por
|
||||
sin
|
||||
un
|
||||
una
|
||||
y
|
||||
y|&
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
k
|
||||
l
|
||||
p
|
||||
|
||||
1
resources/dictionaries/et/stopwords.txt
Normal file
1
resources/dictionaries/et/stopwords.txt
Normal file
@@ -0,0 +1 @@
|
||||
ja|&
|
||||
@@ -1 +1,2 @@
|
||||
&
|
||||
k
|
||||
1
resources/dictionaries/eu/stopwords.txt
Normal file
1
resources/dictionaries/eu/stopwords.txt
Normal file
@@ -0,0 +1 @@
|
||||
eta|&
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
k
|
||||
p
|
||||
r
|
||||
|
||||
1
resources/dictionaries/fi/stopwords.txt
Normal file
1
resources/dictionaries/fi/stopwords.txt
Normal file
@@ -0,0 +1 @@
|
||||
ja|&
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
a
|
||||
ab
|
||||
bc
|
||||
|
||||
@@ -15,7 +15,7 @@ du
|
||||
en
|
||||
en face de
|
||||
entre
|
||||
et
|
||||
et|&
|
||||
l'
|
||||
la
|
||||
le
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
e
|
||||
n
|
||||
o
|
||||
|
||||
@@ -15,7 +15,7 @@ deles
|
||||
delas
|
||||
detras
|
||||
do
|
||||
é
|
||||
e|&
|
||||
en
|
||||
encima
|
||||
enfronte
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
c
|
||||
i
|
||||
j
|
||||
|
||||
1
resources/dictionaries/hr/stopwords.txt
Normal file
1
resources/dictionaries/hr/stopwords.txt
Normal file
@@ -0,0 +1 @@
|
||||
i|&
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
d
|
||||
e
|
||||
k
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
a
|
||||
az
|
||||
egy
|
||||
és|es
|
||||
és|es|&
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
bg
|
||||
bu
|
||||
di
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
jl.
|
||||
jln.
|
||||
@@ -1,5 +1,5 @@
|
||||
berlawanan|lawanan|lwnn
|
||||
dan|dn|n
|
||||
dan|dn|n|en|&
|
||||
dari|dr
|
||||
dekat|dkt
|
||||
di
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
&
|
||||
a
|
||||
n
|
||||
og
|
||||
s
|
||||
v
|
||||
1
resources/dictionaries/is/stopwords.txt
Normal file
1
resources/dictionaries/is/stopwords.txt
Normal file
@@ -0,0 +1 @@
|
||||
og|&
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
c
|
||||
e
|
||||
l
|
||||
|
||||
@@ -24,6 +24,7 @@ dell'
|
||||
dentro|d.tro|dtro
|
||||
di
|
||||
d'
|
||||
e|&
|
||||
fuori
|
||||
gli
|
||||
i
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
&
|
||||
ქ
|
||||
@@ -1 +1 @@
|
||||
და
|
||||
და|&
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
a
|
||||
g
|
||||
k
|
||||
|
||||
1
resources/dictionaries/lt/stopwords.txt
Normal file
1
resources/dictionaries/lt/stopwords.txt
Normal file
@@ -0,0 +1 @@
|
||||
ir|&
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
a
|
||||
d
|
||||
g
|
||||
|
||||
1
resources/dictionaries/lv/stopwords.txt
Normal file
1
resources/dictionaries/lv/stopwords.txt
Normal file
@@ -0,0 +1 @@
|
||||
un|&
|
||||
1
resources/dictionaries/ms/ambiguous_expansions.txt
Normal file
1
resources/dictionaries/ms/ambiguous_expansions.txt
Normal file
@@ -0,0 +1 @@
|
||||
&
|
||||
1
resources/dictionaries/ms/stopwords.txt
Normal file
1
resources/dictionaries/ms/stopwords.txt
Normal file
@@ -0,0 +1 @@
|
||||
dan|&
|
||||
1
resources/dictionaries/mt/ambiguous_expansions.txt
Normal file
1
resources/dictionaries/mt/ambiguous_expansions.txt
Normal file
@@ -0,0 +1 @@
|
||||
&
|
||||
@@ -2,3 +2,4 @@ il
|
||||
is
|
||||
ta
|
||||
tar
|
||||
u|&
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
g
|
||||
h
|
||||
k
|
||||
|
||||
@@ -23,7 +23,7 @@ naer
|
||||
nærmest
|
||||
naermest
|
||||
nest
|
||||
og
|
||||
og|&
|
||||
overfor
|
||||
over
|
||||
på
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
b
|
||||
h
|
||||
k
|
||||
|
||||
@@ -9,7 +9,7 @@ der
|
||||
die
|
||||
dit
|
||||
een
|
||||
en
|
||||
en|&
|
||||
hem
|
||||
het
|
||||
hoe
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
d
|
||||
g
|
||||
k
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
i
|
||||
i|&
|
||||
na
|
||||
w
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
b
|
||||
d
|
||||
e
|
||||
|
||||
@@ -14,7 +14,7 @@ debaixo
|
||||
defronte
|
||||
do
|
||||
dos
|
||||
e
|
||||
e|&
|
||||
em
|
||||
em frente de|em ft de
|
||||
entre
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
e
|
||||
n
|
||||
s
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
și|si|&
|
||||
cel
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
д
|
||||
d
|
||||
г
|
||||
|
||||
1
resources/dictionaries/ru/stopwords.txt
Normal file
1
resources/dictionaries/ru/stopwords.txt
Normal file
@@ -0,0 +1 @@
|
||||
и|&
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
c
|
||||
j
|
||||
s
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
a
|
||||
a|&
|
||||
bližko|blizko
|
||||
cez
|
||||
do
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
c
|
||||
j
|
||||
s
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
in|&
|
||||
na
|
||||
ob
|
||||
pot
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
и
|
||||
i
|
||||
ј
|
||||
|
||||
2
resources/dictionaries/sr/stopwords.txt
Normal file
2
resources/dictionaries/sr/stopwords.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
и|&
|
||||
i|&
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
g
|
||||
l
|
||||
k
|
||||
|
||||
@@ -16,6 +16,7 @@ intill
|
||||
mellan
|
||||
motliggande
|
||||
närmast|naermast
|
||||
och|&
|
||||
över|oever
|
||||
på|paa
|
||||
på andra sidan|paa andra sidan
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
b
|
||||
d
|
||||
g
|
||||
|
||||
1
resources/dictionaries/tr/stopwords.txt
Normal file
1
resources/dictionaries/tr/stopwords.txt
Normal file
@@ -0,0 +1 @@
|
||||
ve|&
|
||||
@@ -1,3 +1,4 @@
|
||||
&
|
||||
д
|
||||
d
|
||||
ш
|
||||
|
||||
2
resources/dictionaries/uk/stopwords.txt
Normal file
2
resources/dictionaries/uk/stopwords.txt
Normal file
@@ -0,0 +1,2 @@
|
||||
і|&
|
||||
i|&
|
||||
@@ -2,7 +2,6 @@
|
||||
import six
|
||||
|
||||
from geodata.text import _normalize
|
||||
from geodata.text.tokenize import tokenize_raw
|
||||
from geodata.text.token_types import token_types
|
||||
|
||||
from geodata.encoding import safe_decode
|
||||
@@ -17,12 +16,7 @@ NORMALIZE_STRING_TRIM = _normalize.NORMALIZE_STRING_TRIM
|
||||
NORMALIZE_STRING_REPLACE_HYPHENS = _normalize.NORMALIZE_STRING_REPLACE_HYPHENS
|
||||
NORMALIZE_STRING_SIMPLE_LATIN_ASCII = _normalize.NORMALIZE_STRING_SIMPLE_LATIN_ASCII
|
||||
|
||||
DEFAULT_STRING_OPTIONS = NORMALIZE_STRING_LATIN_ASCII | \
|
||||
NORMALIZE_STRING_DECOMPOSE | \
|
||||
NORMALIZE_STRING_TRIM | \
|
||||
NORMALIZE_STRING_REPLACE_HYPHENS | \
|
||||
NORMALIZE_STRING_STRIP_ACCENTS | \
|
||||
NORMALIZE_STRING_LOWERCASE
|
||||
DEFAULT_STRING_OPTIONS = _normalize.NORMALIZE_DEFAULT_STRING_OPTIONS
|
||||
|
||||
# Token options
|
||||
NORMALIZE_TOKEN_REPLACE_HYPHENS = _normalize.NORMALIZE_TOKEN_REPLACE_HYPHENS
|
||||
@@ -34,16 +28,10 @@ NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE = _normalize.NORMALIZE_TOKEN_DELETE_OTHE
|
||||
NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC = _normalize.NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC
|
||||
NORMALIZE_TOKEN_REPLACE_DIGITS = _normalize.NORMALIZE_TOKEN_REPLACE_DIGITS
|
||||
|
||||
DEFAULT_TOKEN_OPTIONS = NORMALIZE_TOKEN_REPLACE_HYPHENS | \
|
||||
NORMALIZE_TOKEN_DELETE_FINAL_PERIOD | \
|
||||
NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS | \
|
||||
NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES | \
|
||||
NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE
|
||||
DEFAULT_TOKEN_OPTIONS = _normalize.NORMALIZE_DEFAULT_TOKEN_OPTIONS
|
||||
|
||||
TOKEN_OPTIONS_DROP_PERIODS = NORMALIZE_TOKEN_DELETE_FINAL_PERIOD | \
|
||||
NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS
|
||||
|
||||
DEFAULT_TOKEN_OPTIONS_NUMERIC = (DEFAULT_TOKEN_OPTIONS | NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC)
|
||||
TOKEN_OPTIONS_DROP_PERIODS = _normalize.NORMALIZE_TOKEN_OPTIONS_DROP_PERIODS
|
||||
DEFAULT_TOKEN_OPTIONS_NUMERIC = _normalize.NORMALIZE_DEFAULT_TOKEN_OPTIONS_NUMERIC
|
||||
|
||||
|
||||
def remove_parens(tokens):
|
||||
@@ -62,33 +50,7 @@ def remove_parens(tokens):
|
||||
|
||||
def normalize_string(s, string_options=DEFAULT_STRING_OPTIONS):
|
||||
s = safe_decode(s)
|
||||
if string_options & _normalize.NORMALIZE_STRING_LATIN_ASCII:
|
||||
normalized = _normalize.normalize_string_latin(s, string_options)
|
||||
else:
|
||||
normalized = _normalize.normalize_string_utf8(s, string_options)
|
||||
|
||||
return normalized
|
||||
|
||||
|
||||
def normalize_token(s, t, token_options=DEFAULT_TOKEN_OPTIONS):
|
||||
return _normalize.normalize_token(s, t, token_options)
|
||||
|
||||
|
||||
def normalize_tokens_whitespace(s, raw_tokens, token_options=DEFAULT_TOKEN_OPTIONS):
|
||||
last_end = 0
|
||||
tokens = []
|
||||
|
||||
for t in raw_tokens:
|
||||
t_norm = _normalize.normalize_token(s, t, token_options)
|
||||
t_class = token_types.from_id(t[-1])
|
||||
|
||||
if last_end < t[0]:
|
||||
tokens.append((six.u(' '), token_types.WHITESPACE))
|
||||
last_end = sum(t[:2])
|
||||
|
||||
tokens.append((t_norm, t_class))
|
||||
|
||||
return tokens
|
||||
return _normalize.normalize_string(s, string_options)
|
||||
|
||||
|
||||
def normalized_tokens(s, string_options=DEFAULT_STRING_OPTIONS,
|
||||
@@ -105,20 +67,10 @@ def normalized_tokens(s, string_options=DEFAULT_STRING_OPTIONS,
|
||||
Usage:
|
||||
normalized_tokens(u'St.-Barthélemy')
|
||||
'''
|
||||
normalized = normalize_string(s, string_options=string_options)
|
||||
|
||||
# Tuples of (offset, len, type)
|
||||
raw_tokens = tokenize_raw(normalized)
|
||||
tokens = []
|
||||
last_end = 0
|
||||
|
||||
if not whitespace:
|
||||
tokens = [(_normalize.normalize_token(normalized, t, token_options),
|
||||
token_types.from_id(t[-1])) for t in raw_tokens]
|
||||
else:
|
||||
tokens = normalize_tokens_whitespace(normalized, raw_tokens, token_options=token_options)
|
||||
s = safe_decode(s)
|
||||
normalized_tokens = _normalize.normalized_tokens(s, string_options, token_options, whitespace)
|
||||
|
||||
if strip_parentheticals:
|
||||
return remove_parens(tokens)
|
||||
else:
|
||||
return tokens
|
||||
normalized_tokens = remove_parens(normalized_tokens)
|
||||
|
||||
return [(s, token_types.from_id(token_type)) for s, token_type in normalized_tokens]
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
#include <Python.h>
|
||||
|
||||
#include "src/normalize.h"
|
||||
#include "src/transliterate.h"
|
||||
#include <libpostal/libpostal.h>
|
||||
|
||||
#if PY_MAJOR_VERSION >= 3
|
||||
#define IS_PY3K
|
||||
@@ -19,9 +18,7 @@ struct module_state {
|
||||
static struct module_state _state;
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
static PyObject *py_normalize_string_utf8(PyObject *self, PyObject *args)
|
||||
static PyObject *py_normalize_string(PyObject *self, PyObject *args)
|
||||
{
|
||||
PyObject *arg1;
|
||||
uint64_t options;
|
||||
@@ -48,7 +45,7 @@ static PyObject *py_normalize_string_utf8(PyObject *self, PyObject *args)
|
||||
if (str == NULL) {
|
||||
PyErr_SetString(PyExc_TypeError,
|
||||
"Parameter could not be utf-8 encoded");
|
||||
goto exit_decref_unistr;
|
||||
goto exit_normalize_decref_unistr;
|
||||
}
|
||||
|
||||
char *input = PyBytes_AsString(str);
|
||||
@@ -56,13 +53,13 @@ static PyObject *py_normalize_string_utf8(PyObject *self, PyObject *args)
|
||||
#endif
|
||||
|
||||
if (input == NULL) {
|
||||
goto exit_decref_str;
|
||||
goto exit_normalize_decref_str;
|
||||
}
|
||||
|
||||
char *normalized = normalize_string_utf8(input, options);
|
||||
char *normalized = libpostal_normalize_string(input, options);
|
||||
|
||||
if (normalized == NULL) {
|
||||
goto exit_decref_str;
|
||||
goto exit_normalize_decref_str;
|
||||
}
|
||||
|
||||
PyObject *result = PyUnicode_DecodeUTF8((const char *)normalized, strlen(normalized), "strict");
|
||||
@@ -70,7 +67,7 @@ static PyObject *py_normalize_string_utf8(PyObject *self, PyObject *args)
|
||||
if (result == NULL) {
|
||||
PyErr_SetString(PyExc_ValueError,
|
||||
"Result could not be utf-8 decoded");
|
||||
goto exit_decref_str;
|
||||
goto exit_normalize_decref_str;
|
||||
}
|
||||
|
||||
#ifndef IS_PY3K
|
||||
@@ -80,21 +77,26 @@ static PyObject *py_normalize_string_utf8(PyObject *self, PyObject *args)
|
||||
|
||||
return result;
|
||||
|
||||
exit_decref_str:
|
||||
exit_normalize_decref_str:
|
||||
#ifndef IS_PY3K
|
||||
Py_XDECREF(str);
|
||||
#endif
|
||||
exit_decref_unistr:
|
||||
exit_normalize_decref_unistr:
|
||||
Py_XDECREF(unistr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static PyObject *py_normalize_string_latin(PyObject *self, PyObject *args)
|
||||
static PyObject *py_normalized_tokens(PyObject *self, PyObject *args)
|
||||
{
|
||||
PyObject *arg1;
|
||||
uint64_t options;
|
||||
if (!PyArg_ParseTuple(args, "OK:normalize", &arg1, &options)) {
|
||||
uint64_t string_options = LIBPOSTAL_NORMALIZE_DEFAULT_STRING_OPTIONS;
|
||||
uint64_t token_options = LIBPOSTAL_NORMALIZE_DEFAULT_TOKEN_OPTIONS;
|
||||
uint32_t arg_whitespace = 0;
|
||||
|
||||
PyObject *result = NULL;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "O|KKI:normalize", &arg1, &string_options, &token_options, &arg_whitespace)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -117,7 +119,7 @@ static PyObject *py_normalize_string_latin(PyObject *self, PyObject *args)
|
||||
if (str == NULL) {
|
||||
PyErr_SetString(PyExc_TypeError,
|
||||
"Parameter could not be utf-8 encoded");
|
||||
goto exit_decref_unistr;
|
||||
goto exit_normalized_tokens_decref_str;
|
||||
}
|
||||
|
||||
char *input = PyBytes_AsString(str);
|
||||
@@ -125,98 +127,46 @@ static PyObject *py_normalize_string_latin(PyObject *self, PyObject *args)
|
||||
#endif
|
||||
|
||||
if (input == NULL) {
|
||||
goto exit_decref_str;
|
||||
goto exit_normalized_tokens_decref_str;
|
||||
}
|
||||
|
||||
char *normalized = normalize_string_latin(input, strlen(input), options);
|
||||
bool whitespace = arg_whitespace;
|
||||
|
||||
PyObject *result = PyUnicode_DecodeUTF8((const char *)normalized, strlen(normalized), "strict");
|
||||
free(normalized);
|
||||
if (result == NULL) {
|
||||
PyErr_SetString(PyExc_ValueError,
|
||||
"Result could not be utf-8 decoded");
|
||||
goto exit_decref_str;
|
||||
size_t num_tokens;
|
||||
libpostal_normalized_token_t *normalized_tokens = libpostal_normalized_tokens(input, string_options, token_options, whitespace, &num_tokens);
|
||||
|
||||
if (normalized_tokens == NULL) {
|
||||
goto exit_normalized_tokens_decref_str;
|
||||
}
|
||||
|
||||
#ifndef IS_PY3K
|
||||
Py_XDECREF(str);
|
||||
#endif
|
||||
Py_XDECREF(unistr);
|
||||
|
||||
return result;
|
||||
|
||||
exit_decref_str:
|
||||
#ifndef IS_PY3K
|
||||
Py_XDECREF(str);
|
||||
#endif
|
||||
exit_decref_unistr:
|
||||
Py_XDECREF(unistr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static PyObject *py_normalize_token(PyObject *self, PyObject *args)
|
||||
{
|
||||
PyObject *s;
|
||||
|
||||
uint32_t offset;
|
||||
uint32_t len;
|
||||
uint16_t type;
|
||||
|
||||
uint64_t options;
|
||||
if (!PyArg_ParseTuple(args, "O(IIH)K:normalize", &s, &offset, &len, &type, &options)) {
|
||||
PyErr_SetString(PyExc_TypeError,
|
||||
"Error parsing arguments");
|
||||
return 0;
|
||||
result = PyList_New((Py_ssize_t)num_tokens);
|
||||
if (!result) {
|
||||
goto exit_free_normalized_tokens;
|
||||
}
|
||||
|
||||
token_t token = (token_t){(size_t)offset, (size_t)len, type};
|
||||
|
||||
PyObject *unistr = PyUnicode_FromObject(s);
|
||||
if (unistr == NULL) {
|
||||
PyErr_SetString(PyExc_TypeError,
|
||||
"Parameter could not be converted to unicode in scanner");
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef IS_PY3K
|
||||
// Python 3 encoding, supported by Python 3.3+
|
||||
|
||||
char *input = PyUnicode_AsUTF8(unistr);
|
||||
|
||||
#else
|
||||
// Python 2 encoding
|
||||
|
||||
PyObject *str = PyUnicode_AsEncodedString(unistr, "utf-8", "strict");
|
||||
if (str == NULL) {
|
||||
PyErr_SetString(PyExc_ValueError,
|
||||
"Parameter could not be utf-8 encoded");
|
||||
goto exit_decref_unistr;
|
||||
for (size_t i = 0; i < num_tokens; i++) {
|
||||
libpostal_normalized_token_t normalized_token = normalized_tokens[i];
|
||||
char *token_str = normalized_token.str;
|
||||
PyObject *py_token = PyUnicode_DecodeUTF8((const char *)token_str, strlen(token_str), "strict");
|
||||
if (py_token == NULL) {
|
||||
Py_DECREF(result);
|
||||
goto exit_free_normalized_tokens;
|
||||
}
|
||||
|
||||
char *input = PyBytes_AsString(str);
|
||||
PyObject *t = PyTuple_New(2);
|
||||
PyObject *py_token_type = PyInt_FromLong(normalized_token.token.type);
|
||||
|
||||
#endif
|
||||
PyTuple_SetItem(t, 0, py_token);
|
||||
PyTuple_SetItem(t, 1, py_token_type);
|
||||
|
||||
if (input == NULL) {
|
||||
goto exit_decref_str;
|
||||
// Note: PyList_SetItem steals a reference, so don't worry about DECREF
|
||||
PyList_SetItem(result, (Py_ssize_t)i, t);
|
||||
}
|
||||
|
||||
char_array *token_buffer = char_array_new_size(token.len);
|
||||
|
||||
add_normalized_token(token_buffer, input, token, options);
|
||||
char *token_str = char_array_get_string(token_buffer);
|
||||
PyObject *result = PyUnicode_DecodeUTF8((const char *)token_str, token_buffer->n - 1, "strict");
|
||||
|
||||
if (result == NULL) {
|
||||
PyErr_SetString(PyExc_ValueError,
|
||||
"Error decoding token");
|
||||
char_array_destroy(token_buffer);
|
||||
goto exit_decref_str;
|
||||
for (size_t i = 0; i < num_tokens; i++) {
|
||||
free(normalized_tokens[i].str);
|
||||
}
|
||||
|
||||
char_array_destroy(token_buffer);
|
||||
free(normalized_tokens);
|
||||
|
||||
#ifndef IS_PY3K
|
||||
Py_XDECREF(str);
|
||||
@@ -224,20 +174,24 @@ static PyObject *py_normalize_token(PyObject *self, PyObject *args)
|
||||
Py_XDECREF(unistr);
|
||||
|
||||
return result;
|
||||
|
||||
exit_decref_str:
|
||||
exit_free_normalized_tokens:
|
||||
for (size_t i = 0; i < num_tokens; i++) {
|
||||
free(normalized_tokens[i].str);
|
||||
}
|
||||
free(normalized_tokens);
|
||||
exit_normalized_tokens_decref_str:
|
||||
#ifndef IS_PY3K
|
||||
Py_XDECREF(str);
|
||||
#endif
|
||||
exit_decref_unistr:
|
||||
exit_normalized_tokens_decref_unistr:
|
||||
Py_XDECREF(unistr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static PyMethodDef normalize_methods[] = {
|
||||
{"normalize_string_utf8", (PyCFunction)py_normalize_string_utf8, METH_VARARGS, "normalize_string_utf8(input, options)"},
|
||||
{"normalize_string_latin", (PyCFunction)py_normalize_string_latin, METH_VARARGS, "normalize_string_latin(input, options)"},
|
||||
{"normalize_token", (PyCFunction)py_normalize_token, METH_VARARGS, "normalize_token(input, options)"},
|
||||
{"normalize_string", (PyCFunction)py_normalize_string, METH_VARARGS, "normalize_string(input, options)"},
|
||||
{"normalized_tokens", (PyCFunction)py_normalized_tokens, METH_VARARGS, "normalize_token(input, string_options, token_options, whitespace)"},
|
||||
{NULL, NULL},
|
||||
};
|
||||
|
||||
@@ -295,32 +249,40 @@ init_normalize(void) {
|
||||
INITERROR;
|
||||
}
|
||||
|
||||
if (!transliteration_module_setup(NULL)) {
|
||||
if (!libpostal_setup()) {
|
||||
PyErr_SetString(PyExc_RuntimeError,
|
||||
"Could not load transliterate module");
|
||||
"Could not load libpostal");
|
||||
Py_DECREF(module);
|
||||
INITERROR;
|
||||
}
|
||||
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_LATIN_ASCII", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_LATIN_ASCII));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_TRANSLITERATE", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_TRANSLITERATE));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_STRIP_ACCENTS", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_STRIP_ACCENTS));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_DECOMPOSE", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_DECOMPOSE));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_COMPOSE", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_COMPOSE));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_LOWERCASE", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_LOWERCASE));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_TRIM", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_TRIM));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_REPLACE_HYPHENS", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_REPLACE_HYPHENS));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_SIMPLE_LATIN_ASCII", PyLong_FromUnsignedLongLong(NORMALIZE_STRING_SIMPLE_LATIN_ASCII));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_LATIN_ASCII", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_LATIN_ASCII));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_TRANSLITERATE", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_TRANSLITERATE));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_STRIP_ACCENTS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_STRIP_ACCENTS));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_DECOMPOSE", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_DECOMPOSE));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_COMPOSE", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_COMPOSE));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_LOWERCASE", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_LOWERCASE));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_TRIM", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_TRIM));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_REPLACE_HYPHENS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_REPLACE_HYPHENS));
|
||||
PyModule_AddObject(module, "NORMALIZE_STRING_SIMPLE_LATIN_ASCII", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_STRING_SIMPLE_LATIN_ASCII));
|
||||
|
||||
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_REPLACE_HYPHENS", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_REPLACE_HYPHENS));
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_HYPHENS", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_DELETE_HYPHENS));
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_FINAL_PERIOD", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_DELETE_FINAL_PERIOD));
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS));
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES));
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE));
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC));
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_REPLACE_DIGITS", PyLong_FromUnsignedLongLong(NORMALIZE_TOKEN_REPLACE_DIGITS));
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_REPLACE_HYPHENS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_HYPHENS));
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_HYPHENS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_DELETE_HYPHENS));
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_FINAL_PERIOD", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_DELETE_FINAL_PERIOD));
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS));
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES));
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE));
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC));
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_REPLACE_DIGITS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_DIGITS));
|
||||
|
||||
|
||||
PyModule_AddObject(module, "NORMALIZE_DEFAULT_STRING_OPTIONS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_DEFAULT_STRING_OPTIONS));
|
||||
PyModule_AddObject(module, "NORMALIZE_DEFAULT_TOKEN_OPTIONS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_DEFAULT_TOKEN_OPTIONS));
|
||||
|
||||
PyModule_AddObject(module, "NORMALIZE_TOKEN_OPTIONS_DROP_PERIODS", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_TOKEN_OPTIONS_DROP_PERIODS));
|
||||
|
||||
PyModule_AddObject(module, "NORMALIZE_DEFAULT_TOKEN_OPTIONS_NUMERIC", PyLong_FromUnsignedLongLong(LIBPOSTAL_NORMALIZE_DEFAULT_TOKEN_OPTIONS_NUMERIC));
|
||||
|
||||
|
||||
#if PY_MAJOR_VERSION >= 3
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#include <Python.h>
|
||||
|
||||
#include "src/scanner.h"
|
||||
#include <libpostal/libpostal.h>
|
||||
|
||||
#if PY_MAJOR_VERSION >= 3
|
||||
#define IS_PY3K
|
||||
@@ -18,14 +18,17 @@ struct module_state {
|
||||
static struct module_state _state;
|
||||
#endif
|
||||
|
||||
|
||||
static PyObject *py_tokenize(PyObject *self, PyObject *args)
|
||||
{
|
||||
PyObject *arg1;
|
||||
if (!PyArg_ParseTuple(args, "O:tokenize", &arg1)) {
|
||||
uint32_t arg_whitespace = 0;
|
||||
|
||||
if (!PyArg_ParseTuple(args, "OI:tokenize", &arg1, &arg_whitespace)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool whitespace = arg_whitespace;
|
||||
|
||||
PyObject *unistr = PyUnicode_FromObject(arg1);
|
||||
if (unistr == NULL) {
|
||||
PyErr_SetString(PyExc_TypeError,
|
||||
@@ -57,26 +60,28 @@ static PyObject *py_tokenize(PyObject *self, PyObject *args)
|
||||
goto error_decref_str;
|
||||
}
|
||||
|
||||
token_array *tokens = tokenize(input);
|
||||
size_t num_tokens;
|
||||
|
||||
libpostal_token_t *tokens = libpostal_tokenize(input, whitespace, &num_tokens);
|
||||
if (tokens == NULL) {
|
||||
goto error_decref_str;
|
||||
}
|
||||
|
||||
PyObject *result = PyTuple_New(tokens->n);
|
||||
PyObject *result = PyTuple_New(num_tokens);
|
||||
if (!result) {
|
||||
token_array_destroy(tokens);
|
||||
free(tokens);
|
||||
goto error_decref_str;
|
||||
return 0;
|
||||
}
|
||||
|
||||
PyObject *tuple;
|
||||
|
||||
token_t token;
|
||||
for (size_t i = 0; i < tokens->n; i++) {
|
||||
token = tokens->a[i];
|
||||
libpostal_token_t token;
|
||||
for (size_t i = 0; i < num_tokens; i++) {
|
||||
token = tokens[i];
|
||||
tuple = Py_BuildValue("III", token.offset, token.len, token.type);
|
||||
if (PyTuple_SetItem(result, i, tuple) < 0) {
|
||||
token_array_destroy(tokens);
|
||||
free(tokens);
|
||||
goto error_decref_str;
|
||||
}
|
||||
}
|
||||
@@ -86,7 +91,7 @@ static PyObject *py_tokenize(PyObject *self, PyObject *args)
|
||||
#endif
|
||||
Py_XDECREF(unistr);
|
||||
|
||||
token_array_destroy(tokens);
|
||||
free(tokens);
|
||||
|
||||
return result;
|
||||
|
||||
@@ -100,12 +105,10 @@ error_decref_unistr:
|
||||
}
|
||||
|
||||
static PyMethodDef tokenize_methods[] = {
|
||||
{"tokenize", (PyCFunction)py_tokenize, METH_VARARGS, "tokenize(text)"},
|
||||
{"tokenize", (PyCFunction)py_tokenize, METH_VARARGS, "tokenize(text, whitespace)"},
|
||||
{NULL, NULL},
|
||||
};
|
||||
|
||||
|
||||
|
||||
#ifdef IS_PY3K
|
||||
|
||||
static int tokenize_traverse(PyObject *m, visitproc visit, void *arg) {
|
||||
|
||||
@@ -3,12 +3,9 @@ from geodata.text import _tokenize
|
||||
from geodata.text.token_types import token_types
|
||||
|
||||
|
||||
def tokenize_raw(s):
|
||||
return _tokenize.tokenize(safe_decode(s))
|
||||
|
||||
|
||||
def tokenize(s):
|
||||
def tokenize(s, whitespace=False):
|
||||
u = safe_decode(s)
|
||||
s = safe_encode(s)
|
||||
return [(safe_decode(s[start:start + length]), token_types.from_id(token_type))
|
||||
for start, length, token_type in _tokenize.tokenize(u)]
|
||||
for start, length, token_type in _tokenize.tokenize(u, whitespace)]
|
||||
|
||||
@@ -2,9 +2,7 @@ import os
|
||||
|
||||
from setuptools import setup, Extension, find_packages
|
||||
|
||||
this_dir = os.path.dirname(__file__)
|
||||
PROJECT_DIR = os.path.join(this_dir, os.pardir)
|
||||
SRC_DIR = os.path.join(PROJECT_DIR, 'src')
|
||||
RESOURCES_DIR = 'resources'
|
||||
|
||||
|
||||
def main():
|
||||
@@ -14,35 +12,29 @@ def main():
|
||||
packages=find_packages(),
|
||||
ext_modules=[
|
||||
Extension('geodata.text._tokenize',
|
||||
sources=[os.path.join(SRC_DIR, f)
|
||||
for f in ('scanner.c',
|
||||
'string_utils.c',
|
||||
'tokens.c',
|
||||
'utf8proc/utf8proc.c',
|
||||
)
|
||||
] + ['geodata/text/pytokenize.c'],
|
||||
include_dirs=[PROJECT_DIR],
|
||||
extra_compile_args=['-O0', '-std=gnu99',
|
||||
sources=['geodata/text/pytokenize.c'],
|
||||
libraries=['postal'],
|
||||
include_dirs=['/usr/local/include'],
|
||||
library_dirs=['/usr/local/lib'],
|
||||
extra_compile_args=['-std=c99',
|
||||
'-Wno-unused-function'],
|
||||
),
|
||||
Extension('geodata.text._normalize',
|
||||
sources=[os.path.join(SRC_DIR, f)
|
||||
for f in ('normalize.c',
|
||||
'string_utils.c',
|
||||
'utf8proc/utf8proc.c',
|
||||
'tokens.c',
|
||||
'unicode_scripts.c',
|
||||
'transliterate.c',
|
||||
'file_utils.c',
|
||||
'trie.c',
|
||||
'trie_search.c',)
|
||||
] + ['geodata/text/pynormalize.c'],
|
||||
include_dirs=[PROJECT_DIR],
|
||||
extra_compile_args=['-std=gnu99', '-DHAVE_CONFIG_H',
|
||||
'-DLIBPOSTAL_DATA_DIR="{}"'.format(os.getenv('LIBPOSTAL_DATA_DIR', os.path.realpath(os.path.join(PROJECT_DIR, 'data')))),
|
||||
sources=['geodata/text/pynormalize.c'],
|
||||
libraries=['postal'],
|
||||
include_dirs=['/usr/local/include'],
|
||||
library_dirs=['/usr/local/lib'],
|
||||
extra_compile_args=['-std=c99',
|
||||
'-Wno-unused-function'],
|
||||
),
|
||||
],
|
||||
data_files=[
|
||||
(os.path.join(RESOURCES_DIR, os.path.relpath(d, RESOURCES_DIR)), [os.path.join(d, filename) for filename in filenames])
|
||||
for d, _, filenames in os.walk(RESOURCES_DIR)
|
||||
],
|
||||
package_data={
|
||||
'geodata': ['**/*.sh']
|
||||
},
|
||||
include_package_data=True,
|
||||
zip_safe=False,
|
||||
url='http://mapzen.com',
|
||||
|
||||
@@ -12,7 +12,7 @@ DEFAULT_INCLUDES = -I.. -I/usr/local/include
|
||||
CFLAGS =
|
||||
|
||||
lib_LTLIBRARIES = libpostal.la
|
||||
libpostal_la_SOURCES = strndup.c libpostal.c address_dictionary.c transliterate.c tokens.c trie.c trie_search.c trie_utils.c string_utils.c file_utils.c utf8proc/utf8proc.c cmp/cmp.c normalize.c numex.c features.c unicode_scripts.c address_parser.c address_parser_io.c averaged_perceptron.c crf.c crf_context.c sparse_matrix.c averaged_perceptron_tagger.c graph.c graph_builder.c language_classifier.c language_features.c logistic_regression.c logistic.c minibatch.c float_utils.c ngrams.c
|
||||
libpostal_la_SOURCES = strndup.c libpostal.c expand.c address_dictionary.c transliterate.c tokens.c trie.c trie_search.c trie_utils.c string_utils.c file_utils.c utf8proc/utf8proc.c normalize.c numex.c features.c unicode_scripts.c address_parser.c address_parser_io.c averaged_perceptron.c crf.c crf_context.c sparse_matrix.c averaged_perceptron_tagger.c graph.c graph_builder.c language_classifier.c language_features.c logistic_regression.c logistic.c minibatch.c float_utils.c ngrams.c place.c near_dupe.c double_metaphone.c geohash/geohash.c dedupe.c string_similarity.c acronyms.c soft_tfidf.c jaccard.c
|
||||
libpostal_la_LIBADD = libscanner.la $(CBLAS_LIBS)
|
||||
libpostal_la_CFLAGS = $(CFLAGS_O2) -D LIBPOSTAL_EXPORTS
|
||||
libpostal_la_LDFLAGS = -version-info @LIBPOSTAL_SO_VERSION@ -no-undefined
|
||||
@@ -26,7 +26,7 @@ noinst_LTLIBRARIES = libscanner.la
|
||||
libscanner_la_SOURCES = klib/drand48.c scanner.c
|
||||
libscanner_la_CFLAGS = $(CFLAGS_O0) -D LIBPOSTAL_EXPORTS $(CFLAGS_SCANNER_EXTRA)
|
||||
|
||||
noinst_PROGRAMS = libpostal bench address_parser address_parser_train address_parser_test build_address_dictionary build_numex_table build_trans_table address_parser_train address_parser_test language_classifier_train language_classifier language_classifier_test
|
||||
noinst_PROGRAMS = libpostal bench address_parser address_parser_train address_parser_test build_address_dictionary build_numex_table build_trans_table address_parser_train address_parser_test language_classifier_train language_classifier language_classifier_test near_dupe_test
|
||||
|
||||
libpostal_SOURCES = strndup.c main.c json_encode.c file_utils.c string_utils.c utf8proc/utf8proc.c
|
||||
libpostal_LDADD = libpostal.la
|
||||
@@ -38,6 +38,11 @@ address_parser_SOURCES = strndup.c address_parser_cli.c json_encode.c linenoise/
|
||||
address_parser_LDADD = libpostal.la $(CBLAS_LIBS)
|
||||
address_parser_CFLAGS = $(CFLAGS_O3)
|
||||
|
||||
near_dupe_test_SOURCES = strndup.c near_dupe_test.c string_utils.c utf8proc/utf8proc.c
|
||||
near_dupe_test_LDADD = libpostal.la
|
||||
near_dupe_test_CFLAGS = $(CFLAGS_O3)
|
||||
|
||||
|
||||
build_address_dictionary_SOURCES = strndup.c address_dictionary_builder.c address_dictionary.c file_utils.c string_utils.c trie.c trie_search.c utf8proc/utf8proc.c
|
||||
build_address_dictionary_CFLAGS = $(CFLAGS_O3)
|
||||
build_numex_table_SOURCES = strndup.c numex_table_builder.c numex.c file_utils.c string_utils.c tokens.c trie.c trie_search.c utf8proc/utf8proc.c
|
||||
|
||||
150
src/acronyms.c
Normal file
150
src/acronyms.c
Normal file
@@ -0,0 +1,150 @@
|
||||
#include "acronyms.h"
|
||||
|
||||
static uint32_array *stopword_tokens(const char *str, token_array *tokens, size_t num_languages, char **languages) {
|
||||
size_t len = tokens->n;
|
||||
uint32_array *stopwords_array = uint32_array_new_zeros(len);
|
||||
|
||||
uint32_t *stopwords = stopwords_array->a;
|
||||
|
||||
for (size_t l = 0; l < num_languages; l++) {
|
||||
char *lang = languages[l];
|
||||
phrase_array *lang_phrases = search_address_dictionaries_tokens((char *)str, tokens, lang);
|
||||
|
||||
if (lang_phrases != NULL) {
|
||||
size_t num_lang_phrases = lang_phrases->n;
|
||||
for (size_t p = 0; p < num_lang_phrases; p++) {
|
||||
phrase_t phrase = lang_phrases->a[p];
|
||||
|
||||
if (address_phrase_in_dictionary(phrase, DICTIONARY_STOPWORD)) {
|
||||
for (size_t stop_idx = phrase.start; stop_idx < phrase.start + phrase.len; stop_idx++) {
|
||||
stopwords[stop_idx] = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
phrase_array_destroy(lang_phrases);
|
||||
}
|
||||
}
|
||||
|
||||
return stopwords_array;
|
||||
}
|
||||
|
||||
phrase_array *acronym_token_alignments(const char *s1, token_array *tokens1, const char *s2, token_array *tokens2, size_t num_languages, char **languages) {
|
||||
if (s1 == NULL || tokens1 == NULL || s2 == NULL || tokens2 == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size_t len1 = tokens1->n;
|
||||
size_t len2 = tokens2->n;
|
||||
if (len1 == 0 || len2 == 0 || len1 == len2) return NULL;
|
||||
|
||||
if (len1 > len2) {
|
||||
const char *tmp_s = s1;
|
||||
s1 = s2;
|
||||
s2 = tmp_s;
|
||||
|
||||
token_array *tmp_t = tokens1;
|
||||
tokens1 = tokens2;
|
||||
tokens2 = tmp_t;
|
||||
|
||||
size_t tmp_l = len1;
|
||||
len1 = len2;
|
||||
len2 = tmp_l;
|
||||
}
|
||||
|
||||
phrase_array *alignments = NULL;
|
||||
|
||||
token_t *t1 = tokens1->a;
|
||||
token_t *t2 = tokens2->a;
|
||||
|
||||
uint32_array *stopwords_array = stopword_tokens(s2, tokens2, num_languages, languages);
|
||||
if (stopwords_array == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
uint32_t *stopwords = stopwords_array->a;
|
||||
|
||||
ssize_t acronym_start = -1;
|
||||
ssize_t acronym_token_pos = -1;
|
||||
|
||||
uint8_t *ptr1 = (uint8_t *)s1;
|
||||
uint8_t *ptr2 = (uint8_t *)s2;
|
||||
|
||||
int32_t c1, c2;
|
||||
ssize_t c1_len;
|
||||
ssize_t c2_len;
|
||||
|
||||
size_t t2_consumed = 0;
|
||||
|
||||
for (size_t i = 0; i < len1; i++) {
|
||||
token_t ti = t1[i];
|
||||
|
||||
c1_len = utf8proc_iterate(ptr1 + ti.offset, ti.len, &c1);
|
||||
if (c1_len <= 0 || c1 == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Make sure it's a non-ideographic word. Single letter abbreviations will be captured by other methods
|
||||
if (!is_word_token(ti.type) || is_ideographic(ti.type) || ti.len == c1_len) {
|
||||
acronym_token_pos = -1;
|
||||
continue;
|
||||
}
|
||||
|
||||
size_t ti_pos = 0;
|
||||
|
||||
for (size_t j = t2_consumed; j < len2; j++) {
|
||||
token_t tj = t2[j];
|
||||
c2_len = utf8proc_iterate(ptr2 + tj.offset, tj.len, &c2);
|
||||
if (c2_len <= 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (utf8proc_tolower(c1) == utf8proc_tolower(c2)) {
|
||||
ti_pos += c1_len;
|
||||
if (acronym_start < 0) {
|
||||
acronym_start = j;
|
||||
acronym_token_pos = 0;
|
||||
}
|
||||
acronym_token_pos++;
|
||||
c1_len = utf8proc_iterate(ptr1 + ti.offset + ti_pos, ti.len, &c1);
|
||||
} else if (stopwords[j] && acronym_token_pos > 0) {
|
||||
continue;
|
||||
} else if (is_punctuation(tj.type) && acronym_token_pos > 0) {
|
||||
continue;
|
||||
} else if (ti_pos < ti.len) {
|
||||
acronym_token_pos = -1;
|
||||
acronym_start = -1;
|
||||
ti_pos = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((utf8_is_period(c1) || utf8_is_hyphen(c1)) && ti_pos < ti.len) {
|
||||
ti_pos += c1_len;
|
||||
if (ti_pos < ti.len) {
|
||||
c1_len = utf8proc_iterate(ptr1 + ti.offset + ti_pos, ti.len, &c1);
|
||||
if (c1_len <= 0 || c1 == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ti_pos == ti.len) {
|
||||
phrase_t phrase = (phrase_t){acronym_start, j - acronym_start + 1, i};
|
||||
// got alignment
|
||||
if (alignments == NULL) {
|
||||
alignments = phrase_array_new();
|
||||
}
|
||||
|
||||
phrase_array_push(alignments, phrase);
|
||||
|
||||
ti_pos = 0;
|
||||
acronym_token_pos = -1;
|
||||
acronym_start = -1;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
uint32_array_destroy(stopwords_array);
|
||||
|
||||
return alignments;
|
||||
}
|
||||
15
src/acronyms.h
Normal file
15
src/acronyms.h
Normal file
@@ -0,0 +1,15 @@
|
||||
#ifndef ACRONYMS_H
|
||||
#define ACRONYMS_H
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "address_dictionary.h"
|
||||
#include "collections.h"
|
||||
#include "tokens.h"
|
||||
#include "token_types.h"
|
||||
|
||||
phrase_array *acronym_token_alignments(const char *s1, token_array *tokens1, const char *s2, token_array *tokens2, size_t num_languages, char **languages);
|
||||
|
||||
|
||||
#endif
|
||||
@@ -1,5 +1,6 @@
|
||||
#include <dirent.h>
|
||||
#include <limits.h>
|
||||
#include <stdarg.h>
|
||||
|
||||
#include "address_dictionary.h"
|
||||
|
||||
@@ -35,6 +36,38 @@ inline bool address_expansion_in_dictionary(address_expansion_t expansion, uint1
|
||||
}
|
||||
|
||||
|
||||
bool address_phrase_in_dictionary(phrase_t phrase, uint16_t dictionary_id) {
|
||||
address_expansion_value_t *value = address_dictionary_get_expansions(phrase.data);
|
||||
if (value == NULL) return false;
|
||||
|
||||
address_expansion_array *expansions = value->expansions;
|
||||
if (expansions == NULL) return false;
|
||||
|
||||
address_expansion_t *expansions_array = expansions->a;
|
||||
|
||||
for (size_t i = 0; i < expansions->n; i++) {
|
||||
address_expansion_t expansion = expansions_array[i];
|
||||
if (address_expansion_in_dictionary(expansion, dictionary_id)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool address_phrase_in_dictionaries(phrase_t phrase, size_t n, ...) {
|
||||
va_list args;
|
||||
va_start(args, n);
|
||||
bool in_dictionary = false;
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
uint16_t dictionary_id = va_arg(args, uint16_t);
|
||||
in_dictionary = address_phrase_in_dictionary(phrase, dictionary_id);
|
||||
if (in_dictionary) break;
|
||||
}
|
||||
va_end(args);
|
||||
return in_dictionary;
|
||||
}
|
||||
|
||||
|
||||
int32_t address_dictionary_next_canonical_index(void) {
|
||||
if (address_dict == NULL || address_dict->canonical == NULL) {
|
||||
@@ -63,6 +96,32 @@ char *address_dictionary_get_canonical(uint32_t index) {
|
||||
return cstring_array_get_string(address_dict->canonical, index);
|
||||
}
|
||||
|
||||
inline bool address_expansions_have_canonical_interpretation(address_expansion_array *expansions) {
|
||||
if (expansions == NULL) return false;
|
||||
|
||||
address_expansion_t *expansions_array = expansions->a;
|
||||
|
||||
for (size_t i = 0; i < expansions->n; i++) {
|
||||
address_expansion_t expansion = expansions_array[i];
|
||||
if (expansion.canonical_index == NULL_CANONICAL_INDEX) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
||||
}
|
||||
|
||||
inline bool address_phrase_has_canonical_interpretation(phrase_t phrase) {
|
||||
address_expansion_value_t *value = address_dictionary_get_expansions(phrase.data);
|
||||
if (value == NULL) return false;
|
||||
|
||||
address_expansion_array *expansions = value->expansions;
|
||||
|
||||
return address_expansions_have_canonical_interpretation(expansions);
|
||||
}
|
||||
|
||||
|
||||
|
||||
address_expansion_value_t *address_expansion_value_new(void) {
|
||||
address_expansion_value_t *self = malloc(sizeof(address_expansion_value_t));
|
||||
|
||||
@@ -251,6 +310,31 @@ phrase_array *search_address_dictionaries_tokens(char *str, token_array *tokens,
|
||||
return phrases;
|
||||
}
|
||||
|
||||
|
||||
phrase_t search_address_dictionaries_substring(char *str, size_t len, char *lang) {
|
||||
if (str == NULL) return NULL_PHRASE;
|
||||
if (address_dict == NULL) {
|
||||
log_error(ADDRESS_DICTIONARY_SETUP_ERROR);
|
||||
return NULL_PHRASE;
|
||||
}
|
||||
|
||||
trie_prefix_result_t prefix = get_language_prefix(lang);
|
||||
|
||||
if (prefix.node_id == NULL_NODE_ID) {
|
||||
log_debug("prefix.node_id == NULL_NODE_ID\n");
|
||||
return NULL_PHRASE;
|
||||
}
|
||||
|
||||
phrase_t phrase = trie_search_prefixes_from_index(address_dict->trie, str, len, prefix.node_id);
|
||||
if (phrase.len == len) {
|
||||
return phrase;
|
||||
} else {
|
||||
return NULL_PHRASE;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
phrase_t search_address_dictionaries_prefix(char *str, size_t len, char *lang) {
|
||||
if (str == NULL) return NULL_PHRASE;
|
||||
if (address_dict == NULL) {
|
||||
|
||||
@@ -63,15 +63,20 @@ bool search_address_dictionaries_with_phrases(char *str, char *lang, phrase_arra
|
||||
phrase_array *search_address_dictionaries_tokens(char *str, token_array *tokens, char *lang);
|
||||
bool search_address_dictionaries_tokens_with_phrases(char *str, token_array *tokens, char *lang, phrase_array **phrases);
|
||||
|
||||
phrase_t search_address_dictionaries_substring(char *str, size_t len, char *lang);
|
||||
phrase_t search_address_dictionaries_prefix(char *str, size_t len, char *lang);
|
||||
phrase_t search_address_dictionaries_suffix(char *str, size_t len, char *lang);
|
||||
|
||||
address_expansion_value_t *address_dictionary_get_expansions(uint32_t i);
|
||||
bool address_expansion_in_dictionary(address_expansion_t expansion, uint16_t dictionary_id);
|
||||
bool address_phrase_in_dictionary(phrase_t phrase, uint16_t dictionary_id);
|
||||
bool address_phrase_in_dictionaries(phrase_t phrase, size_t n, ...);
|
||||
char *address_dictionary_get_canonical(uint32_t index);
|
||||
int32_t address_dictionary_next_canonical_index(void);
|
||||
bool address_dictionary_add_canonical(char *canonical);
|
||||
bool address_dictionary_add_expansion(char *key, char *language, address_expansion_t expansion);
|
||||
bool address_expansions_have_canonical_interpretation(address_expansion_array *expansions);
|
||||
bool address_phrase_has_canonical_interpretation(phrase_t phrase);
|
||||
|
||||
void address_dictionary_destroy(address_dictionary_t *self);
|
||||
|
||||
|
||||
@@ -105,7 +105,14 @@ typedef enum {
|
||||
|
||||
#define ADDRESS_PARSER_LABEL_HOUSE "house"
|
||||
#define ADDRESS_PARSER_LABEL_HOUSE_NUMBER "house_number"
|
||||
#define ADDRESS_PARSER_LABEL_PO_BOX "po_box"
|
||||
#define ADDRESS_PARSER_LABEL_BUILDING "building"
|
||||
#define ADDRESS_PARSER_LABEL_ENTRANCE "entrance"
|
||||
#define ADDRESS_PARSER_LABEL_STAIRCASE "staircase"
|
||||
#define ADDRESS_PARSER_LABEL_LEVEL "level"
|
||||
#define ADDRESS_PARSER_LABEL_UNIT "unit"
|
||||
#define ADDRESS_PARSER_LABEL_ROAD "road"
|
||||
#define ADDRESS_PARSER_LABEL_METRO_STATION "metro_station"
|
||||
#define ADDRESS_PARSER_LABEL_SUBURB "suburb"
|
||||
#define ADDRESS_PARSER_LABEL_CITY_DISTRICT "city_district"
|
||||
#define ADDRESS_PARSER_LABEL_CITY "city"
|
||||
@@ -117,6 +124,8 @@ typedef enum {
|
||||
#define ADDRESS_PARSER_LABEL_COUNTRY "country"
|
||||
#define ADDRESS_PARSER_LABEL_WORLD_REGION "world_region"
|
||||
|
||||
#define ADDRESS_PARSER_LABEL_WEBSITE "website"
|
||||
#define ADDRESS_PARSER_LABEL_TELEPHONE "phone"
|
||||
|
||||
typedef union address_parser_types {
|
||||
uint32_t value;
|
||||
|
||||
400
src/dedupe.c
Normal file
400
src/dedupe.c
Normal file
@@ -0,0 +1,400 @@
|
||||
#include "acronyms.h"
|
||||
#include "address_parser.h"
|
||||
#include "dedupe.h"
|
||||
#include "expand.h"
|
||||
#include "float_utils.h"
|
||||
#include "jaccard.h"
|
||||
#include "place.h"
|
||||
#include "scanner.h"
|
||||
#include "soft_tfidf.h"
|
||||
#include "token_types.h"
|
||||
|
||||
bool expansions_intersect(cstring_array *expansions1, cstring_array *expansions2) {
|
||||
size_t n1 = cstring_array_num_strings(expansions1);
|
||||
size_t n2 = cstring_array_num_strings(expansions2);
|
||||
|
||||
bool intersect = false;
|
||||
|
||||
for (size_t i = 0; i < n1; i++) {
|
||||
char *e1 = cstring_array_get_string(expansions1, i);
|
||||
for (size_t j = 0; j < n2; j++) {
|
||||
char *e2 = cstring_array_get_string(expansions2, j);
|
||||
if (string_equals(e1, e2)) {
|
||||
intersect = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (intersect) break;
|
||||
}
|
||||
return intersect;
|
||||
}
|
||||
|
||||
|
||||
bool address_component_equals_root_option(char *s1, char *s2, libpostal_normalize_options_t options, bool root) {
|
||||
uint64_t normalize_string_options = get_normalize_string_options(options);
|
||||
|
||||
size_t n1, n2;
|
||||
cstring_array *expansions1 = NULL;
|
||||
cstring_array *expansions2 = NULL;
|
||||
if (!root) {
|
||||
expansions1 = expand_address(s1, options, &n1);
|
||||
} else {
|
||||
expansions1 = expand_address_root(s1, options, &n1);
|
||||
}
|
||||
|
||||
if (expansions1 == NULL) return false;
|
||||
|
||||
if (!root) {
|
||||
expansions2 = expand_address(s2, options, &n2);
|
||||
} else {
|
||||
expansions2 = expand_address_root(s2, options, &n2);
|
||||
}
|
||||
|
||||
if (expansions2 == NULL) {
|
||||
cstring_array_destroy(expansions1);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool intersect = expansions_intersect(expansions1, expansions2);
|
||||
|
||||
cstring_array_destroy(expansions1);
|
||||
cstring_array_destroy(expansions2);
|
||||
|
||||
return intersect;
|
||||
}
|
||||
|
||||
static inline bool address_component_equals(char *s1, char *s2, libpostal_normalize_options_t options) {
|
||||
return address_component_equals_root_option(s1, s2, options, false);
|
||||
}
|
||||
|
||||
static inline bool address_component_equals_root(char *s1, char *s2, libpostal_normalize_options_t options) {
|
||||
return address_component_equals_root_option(s1, s2, options, true);
|
||||
}
|
||||
|
||||
|
||||
static inline bool address_component_equals_root_fallback(char *s1, char *s2, libpostal_normalize_options_t options, bool root) {
|
||||
return address_component_equals_root(s1, s2, options) || address_component_equals(s1, s2, options);
|
||||
}
|
||||
|
||||
libpostal_duplicate_status_t is_duplicate(char *value1, char *value2, libpostal_normalize_options_t normalize_options, libpostal_duplicate_options_t options, bool root_comparison_first, libpostal_duplicate_status_t root_comparison_status) {
|
||||
if (value1 == NULL || value2 == NULL) {
|
||||
return LIBPOSTAL_NULL_DUPLICATE_STATUS;
|
||||
}
|
||||
|
||||
normalize_options.num_languages = options.num_languages;
|
||||
normalize_options.languages = options.languages;
|
||||
|
||||
if (root_comparison_first) {
|
||||
if (address_component_equals_root(value1, value2, normalize_options)) {
|
||||
return root_comparison_status;
|
||||
} else if (address_component_equals(value1, value2, normalize_options)) {
|
||||
return LIBPOSTAL_EXACT_DUPLICATE;
|
||||
}
|
||||
} else {
|
||||
if (address_component_equals(value1, value2, normalize_options)) {
|
||||
return LIBPOSTAL_EXACT_DUPLICATE;
|
||||
} else if (address_component_equals_root(value1, value2, normalize_options)) {
|
||||
return root_comparison_status;
|
||||
}
|
||||
}
|
||||
return LIBPOSTAL_NON_DUPLICATE;
|
||||
}
|
||||
|
||||
libpostal_duplicate_status_t is_name_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options) {
|
||||
libpostal_normalize_options_t normalize_options = libpostal_get_default_options();
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_NAME | LIBPOSTAL_ADDRESS_ANY;
|
||||
bool root_comparison_first = false;
|
||||
libpostal_duplicate_status_t root_comparison_status = LIBPOSTAL_POSSIBLE_DUPLICATE_NEEDS_REVIEW;
|
||||
return is_duplicate(value1, value2, normalize_options, options, root_comparison_first, root_comparison_status);
|
||||
}
|
||||
libpostal_duplicate_status_t is_street_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options) {
|
||||
libpostal_normalize_options_t normalize_options = libpostal_get_default_options();
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_STREET | LIBPOSTAL_ADDRESS_ANY;
|
||||
bool root_comparison_first = false;
|
||||
libpostal_duplicate_status_t root_comparison_status = LIBPOSTAL_POSSIBLE_DUPLICATE_NEEDS_REVIEW;
|
||||
return is_duplicate(value1, value2, normalize_options, options, root_comparison_first, root_comparison_status);
|
||||
}
|
||||
|
||||
libpostal_duplicate_status_t is_house_number_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options) {
|
||||
libpostal_normalize_options_t normalize_options = libpostal_get_default_options();
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_HOUSE_NUMBER | LIBPOSTAL_ADDRESS_ANY;
|
||||
bool root_comparison_first = true;
|
||||
libpostal_duplicate_status_t root_comparison_status = LIBPOSTAL_EXACT_DUPLICATE;
|
||||
return is_duplicate(value1, value2, normalize_options, options, root_comparison_first, root_comparison_status);
|
||||
}
|
||||
|
||||
libpostal_duplicate_status_t is_unit_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options) {
|
||||
libpostal_normalize_options_t normalize_options = libpostal_get_default_options();
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_UNIT | LIBPOSTAL_ADDRESS_ANY;
|
||||
bool root_comparison_first = true;
|
||||
libpostal_duplicate_status_t root_comparison_status = LIBPOSTAL_EXACT_DUPLICATE;
|
||||
return is_duplicate(value1, value2, normalize_options, options, root_comparison_first, root_comparison_status);
|
||||
}
|
||||
|
||||
libpostal_duplicate_status_t is_floor_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options) {
|
||||
libpostal_normalize_options_t normalize_options = libpostal_get_default_options();
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_LEVEL | LIBPOSTAL_ADDRESS_ANY;
|
||||
bool root_comparison_first = true;
|
||||
libpostal_duplicate_status_t root_comparison_status = LIBPOSTAL_EXACT_DUPLICATE;
|
||||
return is_duplicate(value1, value2, normalize_options, options, root_comparison_first, root_comparison_status);
|
||||
}
|
||||
|
||||
libpostal_duplicate_status_t is_po_box_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options) {
|
||||
libpostal_normalize_options_t normalize_options = libpostal_get_default_options();
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_PO_BOX | LIBPOSTAL_ADDRESS_ANY;
|
||||
bool root_comparison_first = true;
|
||||
libpostal_duplicate_status_t root_comparison_status = LIBPOSTAL_EXACT_DUPLICATE;
|
||||
return is_duplicate(value1, value2, normalize_options, options, root_comparison_first, root_comparison_status);
|
||||
}
|
||||
|
||||
libpostal_duplicate_status_t is_postal_code_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options) {
|
||||
libpostal_normalize_options_t normalize_options = libpostal_get_default_options();
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_POSTAL_CODE | LIBPOSTAL_ADDRESS_ANY;
|
||||
bool root_comparison_first = true;
|
||||
libpostal_duplicate_status_t root_comparison_status = LIBPOSTAL_EXACT_DUPLICATE;
|
||||
return is_duplicate(value1, value2, normalize_options, options, root_comparison_first, root_comparison_status);
|
||||
}
|
||||
|
||||
libpostal_duplicate_status_t is_toponym_duplicate(size_t num_components1, char **labels1, char **values1, size_t num_components2, char **labels2, char **values2, libpostal_duplicate_options_t options) {
|
||||
libpostal_normalize_options_t normalize_options = libpostal_get_default_options();
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_TOPONYM | LIBPOSTAL_ADDRESS_ANY;
|
||||
|
||||
place_t *place1 = place_from_components(num_components1, labels1, values1);
|
||||
place_t *place2 = place_from_components(num_components2, labels2, values2);
|
||||
|
||||
bool city_match = false;
|
||||
libpostal_duplicate_status_t dupe_status = LIBPOSTAL_NON_DUPLICATE;
|
||||
|
||||
if (place1->city != NULL && place2->city != NULL) {
|
||||
city_match = address_component_equals(place1->city, place2->city, normalize_options);
|
||||
if (city_match) {
|
||||
dupe_status = LIBPOSTAL_EXACT_DUPLICATE;
|
||||
}
|
||||
}
|
||||
|
||||
if (!city_match && place1->city == NULL && place1->city_district != NULL && place2->city != NULL) {
|
||||
city_match = address_component_equals(place1->city_district, place2->city, normalize_options);
|
||||
if (city_match) {
|
||||
dupe_status = LIBPOSTAL_LIKELY_DUPLICATE;
|
||||
}
|
||||
}
|
||||
|
||||
if (!city_match && place1->city == NULL && place1->suburb != NULL && place2->city != NULL) {
|
||||
city_match = address_component_equals(place1->suburb, place2->city, normalize_options);
|
||||
if (city_match) {
|
||||
dupe_status = LIBPOSTAL_POSSIBLE_DUPLICATE_NEEDS_REVIEW;
|
||||
}
|
||||
}
|
||||
|
||||
if (!city_match && place2->city == NULL && place2->city_district != NULL && place1->city != NULL) {
|
||||
city_match = address_component_equals(place1->city, place2->city_district, normalize_options);
|
||||
if (city_match) {
|
||||
dupe_status = LIBPOSTAL_LIKELY_DUPLICATE;
|
||||
}
|
||||
}
|
||||
|
||||
if (!city_match && place2->city == NULL && place2->suburb != NULL && place1->city != NULL) {
|
||||
city_match = address_component_equals(place1->suburb, place2->suburb, normalize_options);
|
||||
if (city_match) {
|
||||
dupe_status = LIBPOSTAL_POSSIBLE_DUPLICATE_NEEDS_REVIEW;
|
||||
}
|
||||
}
|
||||
|
||||
if (!city_match) {
|
||||
goto exit_destroy_places;
|
||||
}
|
||||
|
||||
if (city_match && place1->state_district != NULL && place2->state_district != NULL && !address_component_equals_root(place1->state_district, place2->state_district, normalize_options)) {
|
||||
dupe_status = LIBPOSTAL_NON_DUPLICATE;
|
||||
goto exit_destroy_places;
|
||||
}
|
||||
|
||||
if (city_match && place1->state != NULL && place2->state != NULL && !address_component_equals(place1->state, place2->state, normalize_options)) {
|
||||
dupe_status = LIBPOSTAL_NON_DUPLICATE;
|
||||
goto exit_destroy_places;
|
||||
}
|
||||
|
||||
if (city_match && place1->country != NULL && place2->country != NULL && !address_component_equals(place1->country, place2->country, normalize_options)) {
|
||||
dupe_status = LIBPOSTAL_NON_DUPLICATE;
|
||||
goto exit_destroy_places;
|
||||
}
|
||||
|
||||
exit_destroy_places:
|
||||
place_destroy(place1);
|
||||
place_destroy(place2);
|
||||
return dupe_status;
|
||||
|
||||
}
|
||||
|
||||
char *joined_string_and_tokens_from_strings(char **strings, size_t num_strings, token_array *tokens) {
|
||||
if (tokens == NULL || strings == NULL || num_strings == 0) return NULL;
|
||||
token_array_clear(tokens);
|
||||
|
||||
size_t full_len = 0;
|
||||
for (size_t i = 0; i < num_strings; i++) {
|
||||
full_len += strlen(strings[i]);
|
||||
if (i < num_strings - 1) full_len++;
|
||||
}
|
||||
|
||||
char_array *a = char_array_new_size(full_len);
|
||||
for (size_t i = 0; i < num_strings; i++) {
|
||||
char *str = strings[i];
|
||||
size_t len = strlen(str);
|
||||
size_t offset = a->n;
|
||||
char_array_append(a, str);
|
||||
|
||||
scanner_t scanner = scanner_from_string(str, len);
|
||||
uint16_t token_type = scan_token(&scanner);
|
||||
|
||||
token_t token = (token_t){offset, len, token_type};
|
||||
token_array_push(tokens, token);
|
||||
if (i < num_strings - 1 && !is_ideographic(token.type)) {
|
||||
char_array_append(a, " ");
|
||||
}
|
||||
}
|
||||
|
||||
char_array_terminate(a);
|
||||
return char_array_to_string(a);
|
||||
}
|
||||
|
||||
bool have_ideographic_word_tokens(token_array *token_array) {
|
||||
if (token_array == NULL) return false;
|
||||
|
||||
size_t n = token_array->n;
|
||||
token_t *tokens = token_array->a;
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
token_t token = tokens[i];
|
||||
if (is_ideographic(token.type) && is_word_token(token.type)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
libpostal_fuzzy_duplicate_status_t is_fuzzy_duplicate(size_t num_tokens1, char **tokens1, double *token_scores1, size_t num_tokens2, char **tokens2, double *token_scores2, libpostal_fuzzy_duplicate_options_t options, libpostal_normalize_options_t normalize_options, soft_tfidf_options_t soft_tfidf_options, bool do_acronyms) {
|
||||
normalize_options.num_languages = options.num_languages;
|
||||
normalize_options.languages = options.languages;
|
||||
|
||||
normalize_options.address_components |= LIBPOSTAL_ADDRESS_ANY;
|
||||
|
||||
double max_sim = 0.0;
|
||||
|
||||
// Default is non-duplicate;
|
||||
libpostal_duplicate_status_t dupe_status = LIBPOSTAL_NON_DUPLICATE;
|
||||
|
||||
token_array *token_array1 = token_array_new_size(num_tokens1);
|
||||
char *joined1 = joined_string_and_tokens_from_strings(tokens1, num_tokens1, token_array1);
|
||||
|
||||
token_array *token_array2 = token_array_new_size(num_tokens2);
|
||||
char *joined2 = joined_string_and_tokens_from_strings(tokens2, num_tokens2, token_array2);
|
||||
|
||||
size_t num_languages = options.num_languages;
|
||||
char **languages = options.languages;
|
||||
|
||||
phrase_array *acronym_alignments = NULL;
|
||||
|
||||
phrase_array *phrases1 = NULL;
|
||||
phrase_array *phrases2 = NULL;
|
||||
|
||||
bool is_ideographic = have_ideographic_word_tokens(token_array1) && have_ideographic_word_tokens(token_array2);
|
||||
|
||||
if (!is_ideographic) {
|
||||
if (do_acronyms) {
|
||||
acronym_alignments = acronym_token_alignments(joined1, token_array1, joined2, token_array2, num_languages, languages);
|
||||
}
|
||||
|
||||
if (num_languages > 0) {
|
||||
phrases1 = phrase_array_new();
|
||||
phrases2 = phrase_array_new();
|
||||
|
||||
for (size_t i = 0; i < num_languages; i++) {
|
||||
char *lang = languages[i];
|
||||
phrase_array_clear(phrases1);
|
||||
phrase_array_clear(phrases2);
|
||||
|
||||
search_address_dictionaries_tokens_with_phrases(joined1, token_array1, lang, &phrases1);
|
||||
search_address_dictionaries_tokens_with_phrases(joined2, token_array2, lang, &phrases2);
|
||||
|
||||
double sim = soft_tfidf_similarity_with_phrases_and_acronyms(num_tokens1, tokens1, token_scores1, phrases1, num_tokens2, tokens2, token_scores2, phrases2, acronym_alignments, soft_tfidf_options);
|
||||
if (sim > max_sim) {
|
||||
max_sim = sim;
|
||||
}
|
||||
}
|
||||
} else if (do_acronyms) {
|
||||
max_sim = soft_tfidf_similarity_with_phrases_and_acronyms(num_tokens1, tokens1, token_scores1, phrases1, num_tokens2, tokens2, token_scores2, phrases2, acronym_alignments, soft_tfidf_options);
|
||||
} else {
|
||||
max_sim = soft_tfidf_similarity(num_tokens1, tokens1, token_scores1, num_tokens2, tokens2, token_scores2, soft_tfidf_options);
|
||||
}
|
||||
} else {
|
||||
max_sim = jaccard_similarity_string_arrays(num_tokens1, tokens1, num_tokens2, tokens2);
|
||||
if (string_equals(joined1, joined2)) {
|
||||
dupe_status = LIBPOSTAL_EXACT_DUPLICATE;
|
||||
} else if (address_component_equals_root(joined1, joined2, normalize_options)) {
|
||||
dupe_status = LIBPOSTAL_LIKELY_DUPLICATE;
|
||||
}
|
||||
}
|
||||
|
||||
if (dupe_status == LIBPOSTAL_NON_DUPLICATE) {
|
||||
if (max_sim > options.likely_dupe_threshold || double_equals(max_sim, options.likely_dupe_threshold)) {
|
||||
dupe_status = LIBPOSTAL_LIKELY_DUPLICATE;
|
||||
} else if (max_sim > options.needs_review_threshold || double_equals(max_sim, options.needs_review_threshold)) {
|
||||
dupe_status = LIBPOSTAL_POSSIBLE_DUPLICATE_NEEDS_REVIEW;
|
||||
}
|
||||
}
|
||||
|
||||
if (phrases1 != NULL) {
|
||||
phrase_array_destroy(phrases1);
|
||||
}
|
||||
|
||||
if (phrases2 != NULL) {
|
||||
phrase_array_destroy(phrases2);
|
||||
}
|
||||
|
||||
if (acronym_alignments != NULL) {
|
||||
phrase_array_destroy(acronym_alignments);
|
||||
}
|
||||
|
||||
if (token_array1 != NULL) {
|
||||
token_array_destroy(token_array1);
|
||||
}
|
||||
|
||||
if (joined1 != NULL) {
|
||||
free(joined1);
|
||||
}
|
||||
|
||||
if (token_array2 != NULL) {
|
||||
token_array_destroy(token_array2);
|
||||
}
|
||||
|
||||
if (joined2 != NULL) {
|
||||
free(joined2);
|
||||
}
|
||||
|
||||
return (libpostal_fuzzy_duplicate_status_t){dupe_status, max_sim};
|
||||
}
|
||||
|
||||
inline libpostal_fuzzy_duplicate_status_t is_name_duplicate_fuzzy(size_t num_tokens1, char **tokens1, double *token_scores1, size_t num_tokens2, char **tokens2, double *token_scores2, libpostal_fuzzy_duplicate_options_t options) {
|
||||
libpostal_normalize_options_t normalize_options = libpostal_get_default_options();
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_NAME;
|
||||
|
||||
bool do_acronyms = true;
|
||||
|
||||
soft_tfidf_options_t soft_tfidf_options = soft_tfidf_default_options();
|
||||
|
||||
return is_fuzzy_duplicate(num_tokens1, tokens1, token_scores1, num_tokens2, tokens2, token_scores2, options, normalize_options, soft_tfidf_options, do_acronyms);
|
||||
}
|
||||
|
||||
|
||||
inline libpostal_fuzzy_duplicate_status_t is_street_duplicate_fuzzy(size_t num_tokens1, char **tokens1, double *token_scores1, size_t num_tokens2, char **tokens2, double *token_scores2, libpostal_fuzzy_duplicate_options_t options) {
|
||||
libpostal_normalize_options_t normalize_options = libpostal_get_default_options();
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_STREET;
|
||||
|
||||
// General purpose acronyms didn't make as much sense in the street name context
|
||||
// things like County Road = CR should be handled by the address dictionaries
|
||||
bool do_acronyms = false;
|
||||
|
||||
soft_tfidf_options_t soft_tfidf_options = soft_tfidf_default_options();
|
||||
|
||||
return is_fuzzy_duplicate(num_tokens1, tokens1, token_scores1, num_tokens2, tokens2, token_scores2, options, normalize_options, soft_tfidf_options, do_acronyms);
|
||||
}
|
||||
|
||||
23
src/dedupe.h
Normal file
23
src/dedupe.h
Normal file
@@ -0,0 +1,23 @@
|
||||
#ifndef DEDUPE_H
|
||||
#define DEDUPE_H
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "libpostal.h"
|
||||
#include "string_utils.h"
|
||||
|
||||
libpostal_duplicate_status_t is_name_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options);
|
||||
libpostal_duplicate_status_t is_street_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options);
|
||||
libpostal_duplicate_status_t is_house_number_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options);
|
||||
libpostal_duplicate_status_t is_po_box_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options);
|
||||
libpostal_duplicate_status_t is_unit_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options);
|
||||
libpostal_duplicate_status_t is_floor_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options);
|
||||
libpostal_duplicate_status_t is_postal_code_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options);
|
||||
libpostal_duplicate_status_t is_toponym_duplicate(size_t num_components1, char **labels1, char **values1, size_t num_components2, char **labels2, char **values2, libpostal_duplicate_options_t options);
|
||||
|
||||
libpostal_fuzzy_duplicate_status_t is_name_duplicate_fuzzy(size_t num_tokens1, char **tokens1, double *token_scores1, size_t num_tokens2, char **tokens2, double *token_scores2, libpostal_fuzzy_duplicate_options_t options);
|
||||
libpostal_fuzzy_duplicate_status_t is_street_duplicate_fuzzy(size_t num_tokens1, char **tokens1, double *token_scores1, size_t num_tokens2, char **tokens2, double *token_scores2, libpostal_fuzzy_duplicate_options_t options);
|
||||
|
||||
|
||||
#endif
|
||||
981
src/double_metaphone.c
Normal file
981
src/double_metaphone.c
Normal file
@@ -0,0 +1,981 @@
|
||||
#include <stdarg.h>
|
||||
#include <string.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#include "double_metaphone.h"
|
||||
#include "string_utils.h"
|
||||
#include "utf8proc/utf8proc.h"
|
||||
|
||||
static bool is_vowel(char c) {
|
||||
return (c == 'A' || c == 'E' || c == 'I' || c == 'O' || c == 'U' || c == 'Y');
|
||||
}
|
||||
|
||||
static char get_char_at(char *str, size_t len, ssize_t idx) {
|
||||
if (idx < 0 || idx >= len) return 0;
|
||||
return str[idx];
|
||||
}
|
||||
|
||||
static char *get_string_at(char *str, size_t len, ssize_t idx) {
|
||||
if (idx < 0 || idx >= len) return NULL;
|
||||
return str + idx;
|
||||
}
|
||||
|
||||
static inline bool is_slavo_germanic(char *s) {
|
||||
return strstr(s, "W")
|
||||
|| strstr(s, "K")
|
||||
|| strstr(s, "CZ")
|
||||
|| strstr(s, "WITZ");
|
||||
}
|
||||
|
||||
static inline bool substring_equals(char *str, size_t len, ssize_t index, size_t substr_len, ...) {
|
||||
char *string_at_index = get_string_at(str, len, index);
|
||||
if (string_at_index == NULL) return false;
|
||||
|
||||
va_list args;
|
||||
va_start(args, substr_len);
|
||||
|
||||
bool matched = false;
|
||||
|
||||
while (true) {
|
||||
char *sub = va_arg(args, char *);
|
||||
if (sub == NULL) break;
|
||||
|
||||
if (utf8_compare_len(string_at_index, sub, substr_len) == 0) {
|
||||
matched = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
va_end(args);
|
||||
|
||||
return matched;
|
||||
|
||||
}
|
||||
|
||||
double_metaphone_codes_t *double_metaphone(char *input) {
|
||||
if (input == NULL) return NULL;
|
||||
|
||||
char *ptr = utf8_upper(input);
|
||||
|
||||
/* Note: NFD normalization will help with simple decomposable accent characters
|
||||
like "É", "Ü", etc. which effectively become "E\u0301" and "U\u0308". It does
|
||||
not handle characters like "Ł". For these, use Latin-ASCII transliteration
|
||||
prior to calling this function.
|
||||
|
||||
We can still check for a specific accented character like C with cedilla (Ç),
|
||||
by comparing with its decomposed form i.e. "C\xcc\xa7"
|
||||
*/
|
||||
|
||||
char *normalized = (char *)utf8proc_NFD((utf8proc_uint8_t *)ptr);
|
||||
|
||||
if (normalized != NULL) {
|
||||
free(ptr);
|
||||
ptr = normalized;
|
||||
}
|
||||
|
||||
if (ptr == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
char *str = ptr;
|
||||
|
||||
size_t len = strlen(str);
|
||||
char_array *primary = char_array_new_size(len + 1);
|
||||
char_array *secondary = char_array_new_size(len + 1);
|
||||
|
||||
bool slavo_germanic = is_slavo_germanic(str);
|
||||
|
||||
size_t current = 0;
|
||||
size_t last = len - 1;
|
||||
|
||||
if (substring_equals(str, len, current, 2, "ʻ", NULL)) {
|
||||
str += 2;
|
||||
} else if (get_char_at(str, len, current) == '\'') {
|
||||
str++;
|
||||
}
|
||||
|
||||
if (substring_equals(str, len, current, 2, "GN", "KN", "PN", "WR", "PS", NULL)) {
|
||||
current++;
|
||||
} else if (get_char_at(str, len, current) == 'X') {
|
||||
char_array_append(primary, "S");
|
||||
char_array_append(secondary, "S");
|
||||
current++;
|
||||
}
|
||||
|
||||
while (true) {
|
||||
char c = *(str + current);
|
||||
if (c == '\x00') break;
|
||||
|
||||
if (current == 0 && is_vowel(c)) {
|
||||
char_array_append(primary, "A");
|
||||
char_array_append(secondary, "A");
|
||||
current++;
|
||||
continue;
|
||||
} else if (c == 'B') {
|
||||
/* "-mb", e.g", "dumb", already skipped over... */
|
||||
char_array_append(primary, "P");
|
||||
char_array_append(secondary, "P");
|
||||
|
||||
if (get_char_at(str, len, current + 1) == 'B') {
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
continue;
|
||||
// Ç - C with cedilla (denormalized)
|
||||
} else if (substring_equals(str, len, current, 3, "C\xcc\xa7", NULL)) {
|
||||
char_array_append(primary, "S");
|
||||
char_array_append(secondary, "S");
|
||||
current += 2;
|
||||
} else if (c == 'C') {
|
||||
// various germanic
|
||||
if ((current > 1)
|
||||
&& !is_vowel(get_char_at(str, len, current - 2))
|
||||
&& (substring_equals(str, len, current - 1, 3, "ACH", NULL)
|
||||
&& !substring_equals(str, len, current + 2, 1, "O", "A", "U", NULL))
|
||||
&& ((get_char_at(str, len, current + 2) != 'I')
|
||||
&& ((get_char_at(str, len, current + 2) != 'E')
|
||||
|| substring_equals(str, len, current - 2, 6, "BACHER", "MACHER", NULL))
|
||||
)
|
||||
)
|
||||
{
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "K");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
// special case for "caesar"
|
||||
if ((current == 0)
|
||||
&& substring_equals(str, len, current, 6, "CAESAR", NULL))
|
||||
{
|
||||
char_array_append(primary, "S");
|
||||
char_array_append(secondary, "K");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Italian e.g. "chianti"
|
||||
if (substring_equals(str, len, current, 4, "CHIA", NULL)) {
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "K");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (substring_equals(str, len, current, 2, "CH", NULL)) {
|
||||
// "michael"
|
||||
if ((current > 0)
|
||||
&& substring_equals(str, len, current, 4, "CHAE", NULL))
|
||||
{
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "X");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Greek roots e.g. "chemistry", "chorus"
|
||||
if ((current == 0)
|
||||
&& (substring_equals(str, len, current + 1, 5, "HARAC", "HARIS", "HOREO", NULL)
|
||||
|| substring_equals(str, len, current + 1, 4, "HIRO", "HAOS", "HAOT", NULL)
|
||||
|| (substring_equals(str, len, current + 1, 3, "HOR", "HYM", "HIA", "HEM", "HIM", NULL) && !substring_equals(str, len, current + 1, 5, "HEMIN", NULL)))
|
||||
)
|
||||
{
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "K");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Germanic, Greek, or otherwise "ch" for "kh" sound
|
||||
if (
|
||||
(substring_equals(str, len, 0, 4, "VAN ", "VON ", NULL)
|
||||
|| substring_equals(str, len, current - 5, 5, " VAN ", " VON ", NULL)
|
||||
|| substring_equals(str, len, 0, 3, "SCH", NULL))
|
||||
// "ochestra", "orchid", "architect" but not "arch"
|
||||
|| substring_equals(str, len, current - 2, 6, "ORCHES", "ARCHIT", "ORCHID", NULL)
|
||||
|| substring_equals(str, len, current + 2, 1, "T", "S", NULL)
|
||||
|| (
|
||||
(((current == 0) || substring_equals(str, len, current - 1, 1, "A", "O", "U", "E", NULL))
|
||||
// e.g. not "breach", "broach", "pouch", "beech", etc.
|
||||
&& !substring_equals(str, len, current - 2, 2, "EA", "OU", "EE", "OA", "OO", "AU", NULL)
|
||||
// e.g. not "lunch", "birch", "gulch"
|
||||
&& !substring_equals(str, len, current - 1, 1, "L", "R", "N", NULL))
|
||||
// e.g. "wachtler", "wechsler", but not "tichner"
|
||||
&& ((current + 1 == last) || substring_equals(str, len, current + 2, 1, "L", "R", "N", "M", "B", "H", "F", "V", "W", " ", NULL))
|
||||
)
|
||||
)
|
||||
{
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "K");
|
||||
} else {
|
||||
if (current > 0) {
|
||||
if (substring_equals(str, len, 0, 2, "MC", NULL)) {
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "K");
|
||||
} else {
|
||||
char_array_append(primary, "X");
|
||||
char_array_append(secondary, "K");
|
||||
}
|
||||
} else {
|
||||
char_array_append(primary, "X");
|
||||
char_array_append(secondary, "X");
|
||||
}
|
||||
}
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
// e.g, "czerny"
|
||||
if (substring_equals(str, len, current, 2, "CZ", NULL)
|
||||
&& !substring_equals(str, len, current - 2, 4, "WICZ", NULL))
|
||||
{
|
||||
char_array_append(primary, "S");
|
||||
char_array_append(secondary, "X");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
// double 'C' but not if e.g. "McClellan"
|
||||
if (substring_equals(str, len, current, 2, "CC", NULL)
|
||||
&& !((current == 1) && get_char_at(str, len, 0) == 'M'))
|
||||
{
|
||||
// "bellocchio" but not "bacchus"
|
||||
if (substring_equals(str, len, current + 2, 1, "I", "E", "H", NULL)
|
||||
&& !substring_equals(str, len, current + 2, 3, "HUS", "HUM", "HUN", "HAN", NULL))
|
||||
{
|
||||
// "accident", "accede", "succeed"
|
||||
if (((current == 1)
|
||||
&& (get_char_at(str, len, current - 1) == 'A'))
|
||||
|| substring_equals(str, len, current - 1, 5, "UCCEE", "UCCES", NULL))
|
||||
{
|
||||
char_array_append(primary, "KS");
|
||||
char_array_append(secondary, "KS");
|
||||
// "pinocchio" but not "riccio" or "picchu"
|
||||
} else if (get_char_at(str, len, current + 2) == 'H'
|
||||
&& !substring_equals(str, len, current + 2, 2, "HU", "HA", NULL)) {
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "X");
|
||||
} else {
|
||||
char_array_append(primary, "X");
|
||||
char_array_append(secondary, "X");
|
||||
}
|
||||
current += 3;
|
||||
continue;
|
||||
} else {
|
||||
// Pierce's rule
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "K");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (substring_equals(str, len, current, 2, "CK", "CG", "CQ", NULL)) {
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "K");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (substring_equals(str, len, current, 2, "CI", "CJ", "CE", "CY", NULL)) {
|
||||
if (substring_equals(str, len, current, 3, "CIO", "CIE", "CIA", "CIU", NULL)) {
|
||||
char_array_append(primary, "S");
|
||||
char_array_append(secondary, "X");
|
||||
} else {
|
||||
char_array_append(primary, "S");
|
||||
char_array_append(secondary, "S");
|
||||
}
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
// else
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "K");
|
||||
|
||||
if (substring_equals(str, len, current + 1, 2, " C", " Q", " G", NULL)) {
|
||||
current += 3;
|
||||
} else if (substring_equals(str, len, current + 1, 1, "C", "K", "Q", NULL)
|
||||
&& !substring_equals(str, len, current + 1, 2, "CE", "CI", NULL))
|
||||
{
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
|
||||
continue;
|
||||
} else if (c == 'D') {
|
||||
if (substring_equals(str, len, current, 2, "DG", NULL)) {
|
||||
if (substring_equals(str, len, current + 2, 1, "I", "E", "Y", NULL)) {
|
||||
// e.g. "edge"
|
||||
char_array_append(primary, "J");
|
||||
char_array_append(secondary, "J");
|
||||
current += 3;
|
||||
continue;
|
||||
} else {
|
||||
char_array_append(primary, "TK");
|
||||
char_array_append(secondary, "TK");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (substring_equals(str, len, current, 2, "DT", "DD", NULL)) {
|
||||
char_array_append(primary, "T");
|
||||
char_array_append(secondary, "T");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
// else
|
||||
char_array_append(primary, "T");
|
||||
char_array_append(secondary, "T");
|
||||
current++;
|
||||
continue;
|
||||
} else if (c == 'F') {
|
||||
if (get_char_at(str, len, current + 1) == 'F') {
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
|
||||
char_array_append(primary, "F");
|
||||
char_array_append(secondary, "F");
|
||||
continue;
|
||||
} else if (c == 'G') {
|
||||
if (get_char_at(str, len, current + 1) == 'H') {
|
||||
if ((current > 0) && !is_vowel(get_char_at(str, len, current - 1))) {
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "K");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (current == 0) {
|
||||
// "ghislane", "ghiradelli"
|
||||
if (get_char_at(str, len, current + 2) == 'I') {
|
||||
char_array_append(primary, "J");
|
||||
char_array_append(secondary, "J");
|
||||
} else {
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "K");
|
||||
}
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Parker's rule (with some further refinements) - e.g. "hugh"
|
||||
if (
|
||||
((current > 1)
|
||||
&& substring_equals(str, len, current - 2, 1, "B", "H", "D", NULL))
|
||||
// e.g. "bough"
|
||||
|| ((current > 2)
|
||||
&& substring_equals(str, len, current - 3, 1, "B", "H", "D", NULL))
|
||||
// e.g. "broughton"
|
||||
|| ((current > 3)
|
||||
&& substring_equals(str, len, current - 4, 1, "B", "H", NULL))
|
||||
)
|
||||
{
|
||||
current += 2;
|
||||
continue;
|
||||
} else {
|
||||
// e.g. "laugh", "McLaughlin", "cough", "gough", "rough", "tough"
|
||||
if ((current > 2)
|
||||
&& (get_char_at(str, len, current - 1) == 'U')
|
||||
&& substring_equals(str, len, current - 3, 1, "C", "G", "L", "R", "T", NULL))
|
||||
{
|
||||
char_array_append(primary, "F");
|
||||
char_array_append(secondary, "F");
|
||||
} else if ((current > 0)
|
||||
&& get_char_at(str, len, current - 1) != 'I')
|
||||
{
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "K");
|
||||
}
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (get_char_at(str, len, current + 1) == 'N') {
|
||||
if ((current == 1) && is_vowel(get_char_at(str, len, 0))
|
||||
&& !slavo_germanic)
|
||||
{
|
||||
char_array_append(primary, "KN");
|
||||
char_array_append(secondary, "N");
|
||||
// not e.g. "cagney"
|
||||
} else if (!substring_equals(str, len, current + 2, 2, "EY", NULL)
|
||||
&& (get_char_at(str, len, current + 1) != 'Y')
|
||||
&& !slavo_germanic)
|
||||
{
|
||||
char_array_append(primary, "N");
|
||||
char_array_append(secondary, "KN");
|
||||
} else {
|
||||
char_array_append(primary, "KN");
|
||||
char_array_append(secondary, "KN");
|
||||
}
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
// "tagliaro"
|
||||
if (substring_equals(str, len, current + 1, 2, "LI", NULL)
|
||||
&& !slavo_germanic)
|
||||
{
|
||||
char_array_append(primary, "KL");
|
||||
char_array_append(secondary, "L");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
// -ges-, -gep-, -gel-, -gie- at beginning
|
||||
if ((current == 0)
|
||||
&& ((get_char_at(str, len, current + 1) == 'Y')
|
||||
|| substring_equals(str, len, current + 1, 2, "ES", "EP",
|
||||
"EB", "EL", "EY", "IB", "IL", "IN", "IE",
|
||||
"EI", "ER", NULL)))
|
||||
{
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "J");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
// -ger-, -gy-
|
||||
if (
|
||||
(substring_equals(str, len, current + 1, 2, "ER", NULL)
|
||||
|| (get_char_at(str, len, current + 1) == 'Y'))
|
||||
&& !substring_equals(str, len, 0, 6, "DANGER", "RANGER", "MANGER", NULL)
|
||||
&& !substring_equals(str, len, current - 1, 1, "E", "I", NULL)
|
||||
&& !substring_equals(str, len, current - 1, 3, "RGY", "OGY", NULL)
|
||||
)
|
||||
{
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "J");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
// italian e.g. "viaggi"
|
||||
if (substring_equals(str, len, current + 1, 1, "E", "I", "Y", NULL)
|
||||
|| substring_equals(str, len, current - 1, 4, "AGGI", "OGGI", NULL))
|
||||
{
|
||||
// obvious germanic
|
||||
if (
|
||||
(substring_equals(str, len, 0, 4, "VAN ", "VON ", NULL)
|
||||
|| substring_equals(str, len, current - 5, 5, " VAN ", " VON ", NULL)
|
||||
|| substring_equals(str, len, 0, 3, "SCH", NULL))
|
||||
|| substring_equals(str, len, current + 1, 2, "ET", NULL))
|
||||
{
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "K");
|
||||
|
||||
} else {
|
||||
if (substring_equals(str, len, current + 1, 4, "IER ", NULL)
|
||||
|| ((current == len - 3) && substring_equals(str, len, current + 1, 3, "IER", NULL)))
|
||||
{
|
||||
char_array_append(primary, "J");
|
||||
char_array_append(secondary, "J");
|
||||
} else {
|
||||
char_array_append(primary, "J");
|
||||
char_array_append(secondary, "K");
|
||||
}
|
||||
}
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (get_char_at(str, len, current + 1) == 'G') {
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "K");
|
||||
continue;
|
||||
} else if (c == 'H') {
|
||||
// only keep if first & before vowel or between 2 vowels
|
||||
if (((current == 0) || is_vowel(get_char_at(str, len, current - 1)))
|
||||
&& is_vowel(get_char_at(str, len, current + 1)))
|
||||
{
|
||||
char_array_append(primary, "H");
|
||||
char_array_append(secondary, "H");
|
||||
current += 2;
|
||||
// also takes care of "HH"
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
continue;
|
||||
} else if (c == 'J') {
|
||||
// obvious Spanish, "Jose", "San Jacinto"
|
||||
if (substring_equals(str, len, current, 4, "JOSE", NULL)
|
||||
|| substring_equals(str, len, current, 5, "JOSÉ", NULL)
|
||||
|| substring_equals(str, len, 0, 4, "SAN ", NULL))
|
||||
{
|
||||
if (((current == 0)
|
||||
&& (get_char_at(str, len, current + 4) == ' '))
|
||||
|| substring_equals(str, len, 0, 4, "SAN ", NULL))
|
||||
{
|
||||
char_array_append(primary, "H");
|
||||
char_array_append(secondary, "H");
|
||||
} else {
|
||||
char_array_append(primary, "J");
|
||||
char_array_append(secondary, "H");
|
||||
}
|
||||
|
||||
current++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((current == 0)
|
||||
&& !substring_equals(str, len, current, 4, "JOSE", NULL)
|
||||
&& !substring_equals(str, len, current, 5, "JOSÉ", NULL))
|
||||
{
|
||||
// Yankelovich/Jankelowicz
|
||||
char_array_append(primary, "J");
|
||||
char_array_append(secondary, "A");
|
||||
current++;
|
||||
continue;
|
||||
} else {
|
||||
// Spanish pronoun of e.g. "bajador"
|
||||
if (is_vowel(get_char_at(str, len, current - 1))
|
||||
&& !slavo_germanic
|
||||
&& ((get_char_at(str, len, current + 1) == 'A')
|
||||
|| (get_char_at(str, len, current + 1) == 'O')))
|
||||
{
|
||||
char_array_append(primary, "J");
|
||||
char_array_append(secondary, "H");
|
||||
} else {
|
||||
if (current == last || ((current == last - 1 || get_char_at(str, len, current + 2) == ' ') && isalpha(get_char_at(str, len, current - 1)) && substring_equals(str, len, current + 1, 1, "A", "O", NULL))) {
|
||||
char_array_append(primary, "J");
|
||||
} else {
|
||||
if (!substring_equals(str, len, current + 1, 1, "L", "T",
|
||||
"K", "S", "N", "M", "B", "Z", NULL)
|
||||
&& !substring_equals(str, len, current - 1, 1, "S", "K", "L", NULL))
|
||||
{
|
||||
char_array_append(primary, "J");
|
||||
char_array_append(secondary, "J");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// it could happen!
|
||||
if (get_char_at(str, len, current + 1) == 'J') {
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
} else if (c == 'K') {
|
||||
if (get_char_at(str, len, current + 1) == 'K') {
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "K");
|
||||
continue;
|
||||
} else if (c == 'L') {
|
||||
if (get_char_at(str, len, current + 1) == 'L') {
|
||||
// Spanish e.g. "Cabrillo", "Gallegos"
|
||||
if (((current == (len - 3))
|
||||
&& substring_equals(str, len, current - 1, 4, "ILLO", "ILLA", "ALLE", NULL))
|
||||
|| ((substring_equals(str, len, last - 1, 2, "AS", "OS", NULL)
|
||||
|| substring_equals(str, len, last, 1, "A", "O", NULL))
|
||||
&& substring_equals(str, len, current - 1, 4, "ALLE", NULL)
|
||||
)
|
||||
)
|
||||
{
|
||||
char_array_append(primary, "L");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
char_array_append(primary, "L");
|
||||
char_array_append(secondary, "L");
|
||||
continue;
|
||||
} else if (c == 'M') {
|
||||
if ((substring_equals(str, len, current - 1, 3, "UMB", NULL)
|
||||
&& (((current + 1) == last)
|
||||
|| substring_equals(str, len, current + 2, 2, "ER", NULL)))
|
||||
|| (get_char_at(str, len, current + 1) == 'M'))
|
||||
{
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
char_array_append(primary, "M");
|
||||
char_array_append(secondary, "M");
|
||||
continue;
|
||||
// Ñ (NFD normalized)
|
||||
} else if (substring_equals(str, len, current, 3, "N\xcc\x83", NULL)) {
|
||||
current += 3;
|
||||
char_array_append(primary, "N");
|
||||
char_array_append(secondary, "N");
|
||||
continue;
|
||||
} else if (c == 'N') {
|
||||
if (get_char_at(str, len, current + 1) == 'N') {
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
|
||||
char_array_append(primary, "N");
|
||||
char_array_append(secondary, "N");
|
||||
continue;
|
||||
} else if (c == 'P') {
|
||||
if (substring_equals(str, len, current + 1, 1, "H", "F", NULL)) {
|
||||
char_array_append(primary, "F");
|
||||
char_array_append(secondary, "F");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
// also account for "Campbell", "raspberry"
|
||||
if (substring_equals(str, len, current + 1, 1, "P", "B", NULL)) {
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
|
||||
char_array_append(primary, "P");
|
||||
char_array_append(secondary, "P");
|
||||
continue;
|
||||
} else if (c == 'Q') {
|
||||
if (get_char_at(str, len, current + 1) == 'Q') {
|
||||
current += 2;
|
||||
} else {
|
||||
current += 1;
|
||||
}
|
||||
|
||||
char_array_append(primary, "K");
|
||||
char_array_append(secondary, "K");
|
||||
continue;
|
||||
} else if (c == 'R') {
|
||||
// french e.g. "rogier", but exclude "hochmeier"
|
||||
if ((current == last)
|
||||
&& !slavo_germanic
|
||||
&& substring_equals(str, len, current - 2, 2, "IE", NULL)
|
||||
&& !substring_equals(str, len, current - 4, 2, "ME", "MA", NULL))
|
||||
{
|
||||
char_array_append(secondary, "R");
|
||||
} else {
|
||||
char_array_append(primary, "R");
|
||||
char_array_append(secondary, "R");
|
||||
}
|
||||
|
||||
if (get_char_at(str, len, current + 1) == 'R') {
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
continue;
|
||||
} else if (c == 'S') {
|
||||
// special cases "island", "isle", "carlisle", "carlysle"
|
||||
if (substring_equals(str, len, current - 1, 3, "ISL", "YSL", NULL)) {
|
||||
current++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// special case "sugar-"
|
||||
if ((current == 0)
|
||||
&& substring_equals(str, len, current, 5, "SUGAR", NULL))
|
||||
{
|
||||
char_array_append(primary, "X");
|
||||
char_array_append(secondary, "S");
|
||||
current++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (substring_equals(str, len, current, 2, "SH", NULL)) {
|
||||
// Germanic
|
||||
if (substring_equals(str, len, current + 1, 4, "HEIM", "HOEK", "HOLM", "HOLZ", NULL)) {
|
||||
char_array_append(primary, "S");
|
||||
char_array_append(secondary, "S");
|
||||
} else {
|
||||
char_array_append(primary, "X");
|
||||
char_array_append(secondary, "X");
|
||||
}
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Italian & Armenian
|
||||
if (substring_equals(str, len, current, 3, "SIO", "SIA", NULL)
|
||||
|| substring_equals(str, len, current, 4, "SIAN", NULL))
|
||||
{
|
||||
if (!slavo_germanic) {
|
||||
char_array_append(primary, "S");
|
||||
char_array_append(secondary, "X");
|
||||
} else {
|
||||
char_array_append(primary, "S");
|
||||
char_array_append(secondary, "S");
|
||||
}
|
||||
current += 3;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* German & Anglicisations, e.g. "Smith" match "Schmidt", "Snider" match "Schneider"
|
||||
also, -sz- in Slavic language although in Hungarian it is pronounced 's' */
|
||||
if (((current == 0)
|
||||
&& substring_equals(str, len, current + 1, 1, "M", "N", "L", "W", NULL))
|
||||
|| substring_equals(str, len, current + 1, 1, "Z", NULL))
|
||||
{
|
||||
char_array_append(primary, "S");
|
||||
char_array_append(secondary, "X");
|
||||
if (substring_equals(str, len, current + 1, 1, "Z", NULL)) {
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
if (substring_equals(str, len, current, 2, "SC", NULL)) {
|
||||
// Schlesinger's rule
|
||||
if (get_char_at(str, len, current + 2) == 'H') {
|
||||
// Dutch origin e.g. "school", "schooner"
|
||||
if (substring_equals(str, len, current + 3, 2, "OO", "ER", "EN",
|
||||
"UY", "ED", "EM", NULL))
|
||||
{
|
||||
// "Schermerhorn", "Schenker"
|
||||
if (substring_equals(str, len, current + 3, 2, "ER", "EN", NULL)) {
|
||||
char_array_append(primary, "X");
|
||||
char_array_append(secondary, "SK");
|
||||
} else {
|
||||
char_array_append(primary, "SK");
|
||||
char_array_append(secondary, "SK");
|
||||
}
|
||||
current += 3;
|
||||
continue;
|
||||
} else {
|
||||
if ((current == 0) && !is_vowel(get_char_at(str, len, 3))
|
||||
&& (get_char_at(str, len, 3) != 'W'))
|
||||
{
|
||||
char_array_append(primary, "X");
|
||||
char_array_append(secondary, "S");
|
||||
} else {
|
||||
char_array_append(primary, "X");
|
||||
char_array_append(secondary, "X");
|
||||
}
|
||||
current += 3;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (substring_equals(str, len, current + 2, 1, "I", "E", "Y", NULL)) {
|
||||
char_array_append(primary, "S");
|
||||
char_array_append(secondary, "S");
|
||||
current += 3;
|
||||
continue;
|
||||
}
|
||||
|
||||
char_array_append(primary, "SK");
|
||||
char_array_append(secondary, "SK");
|
||||
current += 3;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// French e.g. "resnais", "artois"
|
||||
if ((current == last)
|
||||
&& substring_equals(str, len, current - 2, 2, "AI", "OI", NULL))
|
||||
{
|
||||
char_array_append(secondary, "S");
|
||||
} else {
|
||||
char_array_append(primary, "S");
|
||||
char_array_append(secondary, "S");
|
||||
}
|
||||
|
||||
if (substring_equals(str, len, current + 1, 1, "S", "Z", NULL)) {
|
||||
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
continue;
|
||||
} else if (c == 'T') {
|
||||
|
||||
if (substring_equals(str, len, current, 4, "TION", NULL)) {
|
||||
char_array_append(primary, "X");
|
||||
char_array_append(secondary, "X");
|
||||
current += 3;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (substring_equals(str, len, current, 3, "TIA", "TCH", NULL)) {
|
||||
char_array_append(primary, "X");
|
||||
char_array_append(secondary, "X");
|
||||
current += 3;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (substring_equals(str, len, current, 2, "TH", NULL)
|
||||
|| substring_equals(str, len, current, 3, "TTH", NULL))
|
||||
{
|
||||
// special case "Thomas", "Thames", or Germanic
|
||||
if (substring_equals(str, len, current + 2, 2, "OM", "AM", NULL)
|
||||
|| substring_equals(str, len, 0, 4, "VAN ", "VON ", NULL)
|
||||
|| substring_equals(str, len, current - 5, 5, " VAN ", " VON ", NULL)
|
||||
|| substring_equals(str, len, 0, 3, "SCH", NULL))
|
||||
{
|
||||
char_array_append(primary, "T");
|
||||
char_array_append(secondary, "T");
|
||||
} else {
|
||||
// yes, zero
|
||||
char_array_append(primary, "0");
|
||||
char_array_append(secondary, "T");
|
||||
}
|
||||
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (substring_equals(str, len, current + 1, 1, "T", "D", NULL)) {
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
|
||||
char_array_append(primary, "T");
|
||||
char_array_append(secondary, "T");
|
||||
continue;
|
||||
} else if (c == 'V') {
|
||||
if (get_char_at(str, len, current + 1) == 'V') {
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
|
||||
char_array_append(primary, "F");
|
||||
char_array_append(secondary, "F");
|
||||
continue;
|
||||
} else if (c == 'W') {
|
||||
// can also be in the middle of word
|
||||
if (substring_equals(str, len, current, 2, "WR", NULL)) {
|
||||
char_array_append(primary, "R");
|
||||
char_array_append(secondary, "R");
|
||||
current += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
if ((current == 0)
|
||||
&& (is_vowel(get_char_at(str, len, current + 1))
|
||||
|| substring_equals(str, len, current, 2, "WH", NULL)))
|
||||
{
|
||||
// Wasserman should match Vasserman
|
||||
if (is_vowel(get_char_at(str, len, current + 1))) {
|
||||
char_array_append(primary, "A");
|
||||
char_array_append(secondary, "F");
|
||||
} else {
|
||||
// need Uomo to match Womo
|
||||
char_array_append(primary, "A");
|
||||
char_array_append(secondary, "A");
|
||||
}
|
||||
}
|
||||
|
||||
// Arnow should match Arnoff
|
||||
if (((current == last) && is_vowel(get_char_at(str, len, current - 1)))
|
||||
|| substring_equals(str, len, current - 1, 5, "EWSKI", "EWSKY",
|
||||
"OWSKI", "OWSKY", NULL)
|
||||
|| substring_equals(str, len, 0, 3, "SCH", NULL))
|
||||
{
|
||||
char_array_append(secondary, "F");
|
||||
current++;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Polish e.g. "Filipowicz"
|
||||
if (substring_equals(str, len, current, 4, "WICZ", "WITZ", NULL)) {
|
||||
char_array_append(primary, "TS");
|
||||
char_array_append(secondary, "FX");
|
||||
current += 4;
|
||||
continue;
|
||||
}
|
||||
|
||||
// else skip it
|
||||
current++;
|
||||
continue;
|
||||
} else if (c == 'X') {
|
||||
// French e.g. "breaux"
|
||||
if (!((current == last)
|
||||
&& (substring_equals(str, len, current - 3, 3, "IAU", "EAU", NULL)
|
||||
|| substring_equals(str, len, current - 2, 2, "AU", "OU", NULL))))
|
||||
{
|
||||
char_array_append(primary, "KS");
|
||||
char_array_append(secondary, "KS");
|
||||
}
|
||||
|
||||
if (substring_equals(str, len, current + 1, 1, "C", "X", NULL)) {
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
continue;
|
||||
} else if (c == 'Z') {
|
||||
// Chinese Pinyin e.g. "Zhao"
|
||||
if (get_char_at(str, len, current + 1) == 'H') {
|
||||
char_array_append(primary, "J");
|
||||
char_array_append(secondary, "J");
|
||||
current += 2;
|
||||
continue;
|
||||
} else if (substring_equals(str, len, current + 1, 2, "ZO", "ZI", "ZA", NULL)
|
||||
|| (slavo_germanic
|
||||
&& ((current > 0)
|
||||
&& get_char_at(str, len, current - 1) != 'T')))
|
||||
{
|
||||
char_array_append(primary, "S");
|
||||
char_array_append(secondary, "TS");
|
||||
} else {
|
||||
char_array_append(primary, "S");
|
||||
char_array_append(secondary, "S");
|
||||
}
|
||||
|
||||
if (get_char_at(str, len, current + 1) == 'Z') {
|
||||
current += 2;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
continue;
|
||||
} else {
|
||||
current++;
|
||||
}
|
||||
}
|
||||
|
||||
double_metaphone_codes_t *codes = calloc(1, sizeof(double_metaphone_codes_t));
|
||||
if (codes == NULL) {
|
||||
char_array_destroy(primary);
|
||||
char_array_destroy(secondary);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
codes->primary = char_array_to_string(primary);
|
||||
codes->secondary = char_array_to_string(secondary);
|
||||
|
||||
free(ptr);
|
||||
|
||||
return codes;
|
||||
}
|
||||
|
||||
void double_metaphone_codes_destroy(double_metaphone_codes_t *codes) {
|
||||
if (codes != NULL) {
|
||||
if (codes->primary != NULL) {
|
||||
free(codes->primary);
|
||||
}
|
||||
|
||||
if (codes->secondary != NULL) {
|
||||
free(codes->secondary);
|
||||
}
|
||||
|
||||
free(codes);
|
||||
}
|
||||
}
|
||||
17
src/double_metaphone.h
Normal file
17
src/double_metaphone.h
Normal file
@@ -0,0 +1,17 @@
|
||||
#ifndef DOUBLE_METAPHONE__H
|
||||
#define DOUBLE_METAPHONE__H
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
typedef struct double_metaphone_codes {
|
||||
char *primary;
|
||||
char *secondary;
|
||||
} double_metaphone_codes_t;
|
||||
|
||||
double_metaphone_codes_t *double_metaphone(char *input);
|
||||
|
||||
void double_metaphone_codes_destroy(double_metaphone_codes_t *codes);
|
||||
|
||||
#endif
|
||||
|
||||
1640
src/expand.c
Normal file
1640
src/expand.c
Normal file
File diff suppressed because it is too large
Load Diff
64
src/expand.h
Normal file
64
src/expand.h
Normal file
@@ -0,0 +1,64 @@
|
||||
#ifndef EXPAND_H
|
||||
#define EXPAND_H
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "libpostal.h"
|
||||
|
||||
#include "address_dictionary.h"
|
||||
#include "collections.h"
|
||||
#include "klib/khash.h"
|
||||
#include "klib/ksort.h"
|
||||
#include "trie_search.h"
|
||||
|
||||
typedef struct phrase_language {
|
||||
char *language;
|
||||
phrase_t phrase;
|
||||
} phrase_language_t;
|
||||
|
||||
VECTOR_INIT(phrase_language_array, phrase_language_t)
|
||||
|
||||
#define ks_lt_phrase_language(a, b) ((a).phrase.start < (b).phrase.start || ((a).phrase.start == (b).phrase.start && (a).phrase.len > (b).phrase.len))
|
||||
|
||||
KSORT_INIT(phrase_language_array, phrase_language_t, ks_lt_phrase_language)
|
||||
|
||||
uint64_t get_normalize_token_options(libpostal_normalize_options_t options);
|
||||
uint64_t get_normalize_string_options(libpostal_normalize_options_t options);
|
||||
|
||||
void add_normalized_strings_token(cstring_array *strings, char *str, token_t token, libpostal_normalize_options_t options);
|
||||
void add_postprocessed_string(cstring_array *strings, char *str, libpostal_normalize_options_t options);
|
||||
|
||||
address_expansion_array *valid_affix_expansions(phrase_t phrase, libpostal_normalize_options_t options);
|
||||
|
||||
void cat_affix_expansion(char_array *key, char *str, address_expansion_t expansion, token_t token, phrase_t phrase, libpostal_normalize_options_t options);
|
||||
bool add_affix_expansions(string_tree_t *tree, char *str, char *lang, token_t token, phrase_t prefix, phrase_t suffix, libpostal_normalize_options_t options, bool with_period);
|
||||
|
||||
bool expand_affixes(string_tree_t *tree, char *str, char *lang, token_t token, libpostal_normalize_options_t options);
|
||||
bool expand_affixes_period(string_tree_t *tree, char *str, char *lang, token_t token, libpostal_normalize_options_t options);
|
||||
bool add_period_affixes_or_token(string_tree_t *tree, char *str, token_t token, libpostal_normalize_options_t options);
|
||||
|
||||
bool normalize_ordinal_suffixes(string_tree_t *tree, char *str, char *lang, token_t token, size_t i, token_t prev_token, libpostal_normalize_options_t options);
|
||||
|
||||
void add_normalized_strings_tokenized(string_tree_t *tree, char *str, token_array *tokens, libpostal_normalize_options_t options);
|
||||
|
||||
|
||||
bool address_phrase_is_ignorable_for_components(phrase_t phrase, uint32_t address_components);
|
||||
bool address_phrase_is_edge_ignorable_for_components(phrase_t phrase, uint32_t address_components);
|
||||
bool address_phrase_is_possible_root_for_components(phrase_t phrase, uint32_t address_components);
|
||||
bool address_phrase_is_specifier_for_components(phrase_t phrase, uint32_t address_components);
|
||||
bool address_phrase_is_valid_for_components(phrase_t phrase, uint32_t address_components);
|
||||
|
||||
|
||||
typedef enum {
|
||||
EXPAND_PHRASES,
|
||||
KEEP_PHRASES,
|
||||
DELETE_PHRASES
|
||||
} expansion_phrase_option_t;
|
||||
|
||||
cstring_array *expand_address(char *input, libpostal_normalize_options_t options, size_t *n);
|
||||
cstring_array *expand_address_phrase_option(char *input, libpostal_normalize_options_t options, size_t *n, expansion_phrase_option_t phrase_option);
|
||||
cstring_array *expand_address_root(char *input, libpostal_normalize_options_t options, size_t *n);
|
||||
void expansion_array_destroy(char **expansions, size_t n);
|
||||
|
||||
#endif
|
||||
@@ -25,7 +25,7 @@ gazetteer_t gazetteer_config[] = {
|
||||
{DICTIONARY_NAMED_ORGANIZATION, LIBPOSTAL_ADDRESS_NAME},
|
||||
{DICTIONARY_NAMED_PERSON, LIBPOSTAL_ADDRESS_NAME | LIBPOSTAL_ADDRESS_STREET},
|
||||
{DICTIONARY_NO_NUMBER, LIBPOSTAL_ADDRESS_HOUSE_NUMBER},
|
||||
{DICTIONARY_NUMBER, LIBPOSTAL_ADDRESS_HOUSE_NUMBER | LIBPOSTAL_ADDRESS_UNIT | LIBPOSTAL_ADDRESS_LEVEL | LIBPOSTAL_ADDRESS_STAIRCASE | LIBPOSTAL_ADDRESS_ENTRANCE},
|
||||
{DICTIONARY_NUMBER, LIBPOSTAL_ADDRESS_HOUSE_NUMBER | LIBPOSTAL_ADDRESS_UNIT | LIBPOSTAL_ADDRESS_LEVEL | LIBPOSTAL_ADDRESS_PO_BOX | LIBPOSTAL_ADDRESS_STAIRCASE | LIBPOSTAL_ADDRESS_ENTRANCE},
|
||||
{DICTIONARY_PERSONAL_SUFFIX, LIBPOSTAL_ADDRESS_NAME | LIBPOSTAL_ADDRESS_STREET},
|
||||
{DICTIONARY_PERSONAL_TITLE, LIBPOSTAL_ADDRESS_NAME | LIBPOSTAL_ADDRESS_STREET},
|
||||
{DICTIONARY_PLACE_NAME, LIBPOSTAL_ADDRESS_NAME | LIBPOSTAL_ADDRESS_STREET},
|
||||
|
||||
69
src/jaccard.c
Normal file
69
src/jaccard.c
Normal file
@@ -0,0 +1,69 @@
|
||||
#include "jaccard.h"
|
||||
|
||||
double jaccard_similarity(khash_t(str_set) *s1, khash_t(str_set) *s2) {
|
||||
if (s1 == NULL || s2 == NULL) return 0.0;
|
||||
|
||||
size_t set_intersection = 0;
|
||||
size_t set_union = 0;
|
||||
|
||||
khiter_t k;
|
||||
const char *key;
|
||||
|
||||
kh_foreach_key(s1, key, {
|
||||
k = kh_get(str_set, s2, key);
|
||||
if (k != kh_end(s2)) {
|
||||
set_intersection++;
|
||||
} else {
|
||||
set_union++;
|
||||
}
|
||||
});
|
||||
|
||||
// set_union contains all the keys that were in s1 but not s2
|
||||
// so just add all the keys in s2 to complete the union
|
||||
set_union += kh_size(s2);
|
||||
|
||||
return (double)set_intersection / set_union;
|
||||
}
|
||||
|
||||
|
||||
double jaccard_similarity_string_arrays(size_t num_strings1, char **strings1, size_t num_strings2, char **strings2) {
|
||||
if (strings1 == NULL || strings2 == NULL || num_strings1 == 0 || num_strings2 == 0) return 0.0;
|
||||
|
||||
khash_t(str_set) *string_set1 = kh_init(str_set);
|
||||
if (string_set1 == NULL) return 0.0;
|
||||
|
||||
kh_resize(str_set, string_set1, num_strings1);
|
||||
int ret = 0;
|
||||
|
||||
khiter_t k;
|
||||
|
||||
for (size_t i = 0; i < num_strings1; i++) {
|
||||
char *str1 = strings1[i];
|
||||
k = kh_put(str_set, string_set1, str1, &ret);
|
||||
if (ret < 0) {
|
||||
kh_destroy(str_set, string_set1);
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
khash_t(str_set) *string_set2 = kh_init(str_set);
|
||||
if (string_set2 == NULL) {
|
||||
kh_destroy(str_set, string_set1);
|
||||
return 0.0;
|
||||
}
|
||||
kh_resize(str_set, string_set2, num_strings2);
|
||||
for (size_t i = 0; i < num_strings2; i++) {
|
||||
char *str2 = strings2[i];
|
||||
k = kh_put(str_set, string_set2, str2, &ret);
|
||||
if (ret < 0) {
|
||||
kh_destroy(str_set, string_set1);
|
||||
kh_destroy(str_set, string_set2);
|
||||
return 0.0;
|
||||
}
|
||||
}
|
||||
|
||||
double sim = jaccard_similarity(string_set1, string_set2);
|
||||
kh_destroy(str_set, string_set1);
|
||||
kh_destroy(str_set, string_set2);
|
||||
return sim;
|
||||
}
|
||||
12
src/jaccard.h
Normal file
12
src/jaccard.h
Normal file
@@ -0,0 +1,12 @@
|
||||
#ifndef JACCARD_H
|
||||
#define JACCARD_H
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "collections.h"
|
||||
|
||||
double jaccard_similarity(khash_t(str_set) *s1, khash_t(str_set) *s2);
|
||||
double jaccard_similarity_string_arrays(size_t num_strings1, char **strings1, size_t num_strings2, char **strings2);
|
||||
|
||||
#endif
|
||||
1202
src/libpostal.c
1202
src/libpostal.c
File diff suppressed because it is too large
Load Diff
197
src/libpostal.h
197
src/libpostal.h
@@ -24,6 +24,67 @@ extern "C" {
|
||||
|
||||
#define LIBPOSTAL_MAX_LANGUAGE_LEN 4
|
||||
|
||||
// Doing these as #defines so we can duplicate the values exactly in Python
|
||||
|
||||
|
||||
typedef enum {
|
||||
LIBPOSTAL_TOKEN_TYPE_END = 0, // Null byte
|
||||
// Word types
|
||||
LIBPOSTAL_TOKEN_TYPE_WORD = 1, // Any letter-only word (includes all unicode letters)
|
||||
LIBPOSTAL_TOKEN_TYPE_ABBREVIATION = 2, // Loose abbreviations (roughly anything containing a "." as we don't care about sentences in addresses)
|
||||
LIBPOSTAL_TOKEN_TYPE_IDEOGRAPHIC_CHAR = 3, // For languages that don't separate on whitespace (e.g. Chinese, Japanese, Korean), separate by character
|
||||
LIBPOSTAL_TOKEN_TYPE_HANGUL_SYLLABLE = 4, // Hangul syllable sequences which contain more than one codepoint
|
||||
LIBPOSTAL_TOKEN_TYPE_ACRONYM = 5, // Specifically things like U.N. where we may delete internal periods
|
||||
|
||||
LIBPOSTAL_TOKEN_TYPE_PHRASE = 10, // Not part of the first stage tokenizer, but may be used after phrase parsing
|
||||
|
||||
// Special tokens
|
||||
LIBPOSTAL_TOKEN_TYPE_EMAIL = 20, // Make sure emails are tokenized altogether
|
||||
LIBPOSTAL_TOKEN_TYPE_URL = 21, // Make sure urls are tokenized altogether
|
||||
LIBPOSTAL_TOKEN_TYPE_US_PHONE = 22, // US phone number (with or without country code)
|
||||
LIBPOSTAL_TOKEN_TYPE_INTL_PHONE = 23, // A non-US phone number (must have country code)
|
||||
|
||||
// Numbers and numeric types
|
||||
LIBPOSTAL_TOKEN_TYPE_NUMERIC = 50, // Any sequence containing a digit
|
||||
LIBPOSTAL_TOKEN_TYPE_ORDINAL = 51, // 1st, 2nd, 1er, 1 etc.
|
||||
LIBPOSTAL_TOKEN_TYPE_ROMAN_NUMERAL = 52, // II, III, VI, etc.
|
||||
LIBPOSTAL_TOKEN_TYPE_IDEOGRAPHIC_NUMBER = 53, // All numeric ideographic characters, includes e.g. Han numbers and chars like "²"
|
||||
|
||||
// Punctuation types, may separate a phrase
|
||||
LIBPOSTAL_TOKEN_TYPE_PERIOD = 100,
|
||||
LIBPOSTAL_TOKEN_TYPE_EXCLAMATION = 101,
|
||||
LIBPOSTAL_TOKEN_TYPE_QUESTION_MARK = 102,
|
||||
LIBPOSTAL_TOKEN_TYPE_COMMA = 103,
|
||||
LIBPOSTAL_TOKEN_TYPE_COLON = 104,
|
||||
LIBPOSTAL_TOKEN_TYPE_SEMICOLON = 105,
|
||||
LIBPOSTAL_TOKEN_TYPE_PLUS = 106,
|
||||
LIBPOSTAL_TOKEN_TYPE_AMPERSAND = 107,
|
||||
LIBPOSTAL_TOKEN_TYPE_AT_SIGN = 108,
|
||||
LIBPOSTAL_TOKEN_TYPE_POUND = 109,
|
||||
LIBPOSTAL_TOKEN_TYPE_ELLIPSIS = 110,
|
||||
LIBPOSTAL_TOKEN_TYPE_DASH = 111,
|
||||
LIBPOSTAL_TOKEN_TYPE_BREAKING_DASH = 112,
|
||||
LIBPOSTAL_TOKEN_TYPE_HYPHEN = 113,
|
||||
LIBPOSTAL_TOKEN_TYPE_PUNCT_OPEN = 114,
|
||||
LIBPOSTAL_TOKEN_TYPE_PUNCT_CLOSE = 115,
|
||||
LIBPOSTAL_TOKEN_TYPE_DOUBLE_QUOTE = 119,
|
||||
LIBPOSTAL_TOKEN_TYPE_SINGLE_QUOTE = 120,
|
||||
LIBPOSTAL_TOKEN_TYPE_OPEN_QUOTE = 121,
|
||||
LIBPOSTAL_TOKEN_TYPE_CLOSE_QUOTE = 122,
|
||||
LIBPOSTAL_TOKEN_TYPE_SLASH = 124,
|
||||
LIBPOSTAL_TOKEN_TYPE_BACKSLASH = 125,
|
||||
LIBPOSTAL_TOKEN_TYPE_GREATER_THAN = 126,
|
||||
LIBPOSTAL_TOKEN_TYPE_LESS_THAN = 127,
|
||||
|
||||
// Non-letters and whitespace
|
||||
LIBPOSTAL_TOKEN_TYPE_OTHER = 200,
|
||||
LIBPOSTAL_TOKEN_TYPE_WHITESPACE = 300,
|
||||
LIBPOSTAL_TOKEN_TYPE_NEWLINE = 301,
|
||||
|
||||
LIBPOSTAL_TOKEN_TYPE_INVALID_CHAR = 500
|
||||
} libpostal_token_type_t;
|
||||
|
||||
|
||||
/*
|
||||
Address dictionaries
|
||||
*/
|
||||
@@ -77,6 +138,7 @@ typedef struct libpostal_normalize_options {
|
||||
LIBPOSTAL_EXPORT libpostal_normalize_options_t libpostal_get_default_options(void);
|
||||
|
||||
LIBPOSTAL_EXPORT char **libpostal_expand_address(char *input, libpostal_normalize_options_t options, size_t *n);
|
||||
LIBPOSTAL_EXPORT char **libpostal_expand_address_root(char *input, libpostal_normalize_options_t options, size_t *n);
|
||||
|
||||
LIBPOSTAL_EXPORT void libpostal_expansion_array_destroy(char **expansions, size_t n);
|
||||
|
||||
@@ -90,6 +152,8 @@ typedef struct libpostal_address_parser_response {
|
||||
char **labels;
|
||||
} libpostal_address_parser_response_t;
|
||||
|
||||
typedef libpostal_address_parser_response_t libpostal_parsed_address_components_t;
|
||||
|
||||
typedef struct libpostal_address_parser_options {
|
||||
char *language;
|
||||
char *country;
|
||||
@@ -103,6 +167,87 @@ LIBPOSTAL_EXPORT libpostal_address_parser_response_t *libpostal_parse_address(ch
|
||||
|
||||
LIBPOSTAL_EXPORT bool libpostal_parser_print_features(bool print_features);
|
||||
|
||||
|
||||
/*
|
||||
Deduping
|
||||
*/
|
||||
|
||||
|
||||
// Near-dupe hashing methods
|
||||
|
||||
typedef struct libpostal_near_dupe_hash_options {
|
||||
bool with_name;
|
||||
bool with_address;
|
||||
bool with_unit;
|
||||
bool with_city_or_equivalent;
|
||||
bool with_small_containing_boundaries;
|
||||
bool with_postal_code;
|
||||
bool with_latlon;
|
||||
double latitude;
|
||||
double longitude;
|
||||
uint32_t geohash_precision;
|
||||
bool name_and_address_keys;
|
||||
bool name_only_keys;
|
||||
bool address_only_keys;
|
||||
} libpostal_near_dupe_hash_options_t;
|
||||
|
||||
|
||||
LIBPOSTAL_EXPORT libpostal_near_dupe_hash_options_t libpostal_get_near_dupe_hash_default_options(void);
|
||||
LIBPOSTAL_EXPORT char **libpostal_near_dupe_hashes(size_t num_components, char **labels, char **values, libpostal_near_dupe_hash_options_t options, size_t *num_hashes);
|
||||
LIBPOSTAL_EXPORT char **libpostal_near_dupe_hashes_languages(size_t num_components, char **labels, char **values, libpostal_near_dupe_hash_options_t options, size_t num_languages, char **languages, size_t *num_hashes);
|
||||
|
||||
// Dupe language classification
|
||||
|
||||
LIBPOSTAL_EXPORT char **libpostal_place_languages(size_t num_components, char **labels, char **values, size_t *num_languages);
|
||||
|
||||
// Pairwise dupe methods
|
||||
|
||||
typedef enum {
|
||||
LIBPOSTAL_NULL_DUPLICATE_STATUS = -1,
|
||||
LIBPOSTAL_NON_DUPLICATE = 0,
|
||||
LIBPOSTAL_POSSIBLE_DUPLICATE_NEEDS_REVIEW = 3,
|
||||
LIBPOSTAL_LIKELY_DUPLICATE = 6,
|
||||
LIBPOSTAL_EXACT_DUPLICATE = 9,
|
||||
} libpostal_duplicate_status_t;
|
||||
|
||||
typedef struct libpostal_duplicate_options {
|
||||
size_t num_languages;
|
||||
char **languages;
|
||||
} libpostal_duplicate_options_t;
|
||||
|
||||
|
||||
LIBPOSTAL_EXPORT libpostal_duplicate_options_t libpostal_get_default_duplicate_options(void);
|
||||
LIBPOSTAL_EXPORT libpostal_duplicate_options_t libpostal_get_duplicate_options_with_languages(size_t num_languages, char **languages);
|
||||
|
||||
LIBPOSTAL_EXPORT libpostal_duplicate_status_t libpostal_is_name_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options);
|
||||
LIBPOSTAL_EXPORT libpostal_duplicate_status_t libpostal_is_street_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options);
|
||||
LIBPOSTAL_EXPORT libpostal_duplicate_status_t libpostal_is_house_number_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options);
|
||||
LIBPOSTAL_EXPORT libpostal_duplicate_status_t libpostal_is_po_box_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options);
|
||||
LIBPOSTAL_EXPORT libpostal_duplicate_status_t libpostal_is_unit_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options);
|
||||
LIBPOSTAL_EXPORT libpostal_duplicate_status_t libpostal_is_floor_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options);
|
||||
LIBPOSTAL_EXPORT libpostal_duplicate_status_t libpostal_is_postal_code_duplicate(char *value1, char *value2, libpostal_duplicate_options_t options);
|
||||
LIBPOSTAL_EXPORT libpostal_duplicate_status_t libpostal_is_toponym_duplicate(size_t num_components1, char **labels1, char **values1, size_t num_components2, char **labels2, char **values2, libpostal_duplicate_options_t options);
|
||||
|
||||
// Pairwise fuzzy dupe methods, return status & similarity
|
||||
|
||||
typedef struct libpostal_fuzzy_duplicate_options {
|
||||
size_t num_languages;
|
||||
char **languages;
|
||||
double needs_review_threshold;
|
||||
double likely_dupe_threshold;
|
||||
} libpostal_fuzzy_duplicate_options_t;
|
||||
|
||||
typedef struct libpostal_fuzzy_duplicate_status {
|
||||
libpostal_duplicate_status_t status;
|
||||
double similarity;
|
||||
} libpostal_fuzzy_duplicate_status_t;
|
||||
|
||||
LIBPOSTAL_EXPORT libpostal_fuzzy_duplicate_options_t libpostal_get_default_fuzzy_duplicate_options(void);
|
||||
LIBPOSTAL_EXPORT libpostal_fuzzy_duplicate_options_t libpostal_get_default_fuzzy_duplicate_options_with_languages(size_t num_languages, char **languages);
|
||||
|
||||
LIBPOSTAL_EXPORT libpostal_fuzzy_duplicate_status_t libpostal_is_name_duplicate_fuzzy(size_t num_tokens1, char **tokens1, double *token_scores1, size_t num_tokens2, char **tokens2, double *token_scores2, libpostal_fuzzy_duplicate_options_t options);
|
||||
LIBPOSTAL_EXPORT libpostal_fuzzy_duplicate_status_t libpostal_is_street_duplicate_fuzzy(size_t num_tokens1, char **tokens1, double *token_scores1, size_t num_tokens2, char **tokens2, double *token_scores2, libpostal_fuzzy_duplicate_options_t options);
|
||||
|
||||
// Setup/teardown methods
|
||||
|
||||
LIBPOSTAL_EXPORT bool libpostal_setup(void);
|
||||
@@ -117,6 +262,58 @@ LIBPOSTAL_EXPORT bool libpostal_setup_language_classifier(void);
|
||||
LIBPOSTAL_EXPORT bool libpostal_setup_language_classifier_datadir(char *datadir);
|
||||
LIBPOSTAL_EXPORT void libpostal_teardown_language_classifier(void);
|
||||
|
||||
/* Tokenization and token normalization APIs */
|
||||
|
||||
typedef struct libpostal_token {
|
||||
size_t offset;
|
||||
size_t len;
|
||||
uint16_t type;
|
||||
} libpostal_token_t;
|
||||
|
||||
LIBPOSTAL_EXPORT libpostal_token_t *libpostal_tokenize(char *input, bool whitespace, size_t *n);
|
||||
|
||||
// Normalize string options
|
||||
#define LIBPOSTAL_NORMALIZE_STRING_LATIN_ASCII 1 << 0
|
||||
#define LIBPOSTAL_NORMALIZE_STRING_TRANSLITERATE 1 << 1
|
||||
#define LIBPOSTAL_NORMALIZE_STRING_STRIP_ACCENTS 1 << 2
|
||||
#define LIBPOSTAL_NORMALIZE_STRING_DECOMPOSE 1 << 3
|
||||
#define LIBPOSTAL_NORMALIZE_STRING_LOWERCASE 1 << 4
|
||||
#define LIBPOSTAL_NORMALIZE_STRING_TRIM 1 << 5
|
||||
#define LIBPOSTAL_NORMALIZE_STRING_REPLACE_HYPHENS 1 << 6
|
||||
#define LIBPOSTAL_NORMALIZE_STRING_COMPOSE 1 << 7
|
||||
#define LIBPOSTAL_NORMALIZE_STRING_SIMPLE_LATIN_ASCII 1 << 8
|
||||
#define LIBPOSTAL_NORMALIZE_STRING_REPLACE_NUMEX 1 << 9
|
||||
|
||||
// Normalize token options
|
||||
#define LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_HYPHENS 1 << 0
|
||||
#define LIBPOSTAL_NORMALIZE_TOKEN_DELETE_HYPHENS 1 << 1
|
||||
#define LIBPOSTAL_NORMALIZE_TOKEN_DELETE_FINAL_PERIOD 1 << 2
|
||||
#define LIBPOSTAL_NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS 1 << 3
|
||||
#define LIBPOSTAL_NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES 1 << 4
|
||||
#define LIBPOSTAL_NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE 1 << 5
|
||||
#define LIBPOSTAL_NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC 1 << 6
|
||||
#define LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_DIGITS 1 << 7
|
||||
#define LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_NUMERIC_TOKEN_LETTERS 1 << 8
|
||||
#define LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_NUMERIC_HYPHENS 1 << 9
|
||||
|
||||
#define LIBPOSTAL_NORMALIZE_DEFAULT_STRING_OPTIONS (LIBPOSTAL_NORMALIZE_STRING_LATIN_ASCII | LIBPOSTAL_NORMALIZE_STRING_COMPOSE | LIBPOSTAL_NORMALIZE_STRING_TRIM | LIBPOSTAL_NORMALIZE_STRING_REPLACE_HYPHENS | LIBPOSTAL_NORMALIZE_STRING_STRIP_ACCENTS | LIBPOSTAL_NORMALIZE_STRING_LOWERCASE)
|
||||
|
||||
#define LIBPOSTAL_NORMALIZE_DEFAULT_TOKEN_OPTIONS (LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_HYPHENS | LIBPOSTAL_NORMALIZE_TOKEN_DELETE_FINAL_PERIOD | LIBPOSTAL_NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS | LIBPOSTAL_NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES | LIBPOSTAL_NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE)
|
||||
|
||||
#define LIBPOSTAL_NORMALIZE_TOKEN_OPTIONS_DROP_PERIODS (LIBPOSTAL_NORMALIZE_TOKEN_DELETE_FINAL_PERIOD | LIBPOSTAL_NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS)
|
||||
|
||||
#define LIBPOSTAL_NORMALIZE_DEFAULT_TOKEN_OPTIONS_NUMERIC (LIBPOSTAL_NORMALIZE_DEFAULT_TOKEN_OPTIONS | LIBPOSTAL_NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC)
|
||||
|
||||
LIBPOSTAL_EXPORT char *libpostal_normalize_string(char *input, uint64_t options);
|
||||
|
||||
|
||||
typedef struct libpostal_normalized_token {
|
||||
char *str;
|
||||
libpostal_token_t token;
|
||||
} libpostal_normalized_token_t;
|
||||
|
||||
libpostal_normalized_token_t *libpostal_normalized_tokens(char *input, uint64_t string_options, uint64_t token_options, bool whitespace, size_t *n);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
970
src/near_dupe.c
Normal file
970
src/near_dupe.c
Normal file
@@ -0,0 +1,970 @@
|
||||
#include <stdarg.h>
|
||||
|
||||
#include "log/log.h"
|
||||
|
||||
#include "near_dupe.h"
|
||||
#include "double_metaphone.h"
|
||||
#include "expand.h"
|
||||
#include "features.h"
|
||||
#include "float_utils.h"
|
||||
#include "place.h"
|
||||
#include "scanner.h"
|
||||
#include "string_utils.h"
|
||||
#include "tokens.h"
|
||||
#include "unicode_scripts.h"
|
||||
#include "unicode_script_types.h"
|
||||
|
||||
#include "geohash/geohash.h"
|
||||
|
||||
#define MAX_GEOHASH_PRECISION 12
|
||||
|
||||
#define NAME_KEY_PREFIX "n"
|
||||
#define ADDRESS_KEY_PREFIX "a"
|
||||
#define UNIT_KEY_PREFIX "u"
|
||||
#define PO_BOX_KEY_PREFIX "p"
|
||||
#define HOUSE_NUMBER_KEY_PREFIX "h"
|
||||
#define STREET_KEY_PREFIX "s"
|
||||
|
||||
#define GEOHASH_KEY_PREFIX "gh"
|
||||
#define POSTCODE_KEY_PREFIX "pc"
|
||||
#define CITY_KEY_PREFIX "ct"
|
||||
#define CONTAINING_BOUNDARY_PREFIX "cb"
|
||||
|
||||
#define NAME_ADDRESS_UNIT_GEOHASH_KEY_PREFIX NAME_KEY_PREFIX ADDRESS_KEY_PREFIX UNIT_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define NAME_ADDRESS_UNIT_CITY_KEY_PREFIX NAME_KEY_PREFIX ADDRESS_KEY_PREFIX UNIT_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define NAME_ADDRESS_UNIT_CONTAINING_KEY_PREFIX NAME_KEY_PREFIX ADDRESS_KEY_PREFIX UNIT_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define NAME_ADDRESS_UNIT_POSTCODE_KEY_PREFIX NAME_KEY_PREFIX ADDRESS_KEY_PREFIX UNIT_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
#define NAME_ADDRESS_GEOHASH_KEY_PREFIX NAME_KEY_PREFIX ADDRESS_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define NAME_ADDRESS_CITY_KEY_PREFIX NAME_KEY_PREFIX ADDRESS_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define NAME_ADDRESS_CONTAINING_KEY_PREFIX NAME_KEY_PREFIX ADDRESS_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define NAME_ADDRESS_POSTCODE_KEY_PREFIX NAME_KEY_PREFIX ADDRESS_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
#define NAME_HOUSE_NUMBER_UNIT_GEOHASH_KEY_PREFIX NAME_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX UNIT_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define NAME_HOUSE_NUMBER_UNIT_CITY_KEY_PREFIX NAME_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX UNIT_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define NAME_HOUSE_NUMBER_UNIT_CONTAINING_KEY_PREFIX NAME_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX UNIT_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define NAME_HOUSE_NUMBER_UNIT_POSTCODE_KEY_PREFIX NAME_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX UNIT_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
#define NAME_HOUSE_NUMBER_GEOHASH_KEY_PREFIX NAME_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define NAME_HOUSE_NUMBER_CITY_KEY_PREFIX NAME_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define NAME_HOUSE_NUMBER_CONTAINING_KEY_PREFIX NAME_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define NAME_HOUSE_NUMBER_POSTCODE_KEY_PREFIX NAME_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
#define NAME_STREET_UNIT_GEOHASH_KEY_PREFIX NAME_KEY_PREFIX STREET_KEY_PREFIX UNIT_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define NAME_STREET_UNIT_CITY_KEY_PREFIX NAME_KEY_PREFIX STREET_KEY_PREFIX UNIT_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define NAME_STREET_UNIT_CONTAINING_KEY_PREFIX NAME_KEY_PREFIX STREET_KEY_PREFIX UNIT_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define NAME_STREET_UNIT_POSTCODE_KEY_PREFIX NAME_KEY_PREFIX STREET_KEY_PREFIX UNIT_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
#define NAME_STREET_GEOHASH_KEY_PREFIX NAME_KEY_PREFIX STREET_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define NAME_STREET_CITY_KEY_PREFIX NAME_KEY_PREFIX STREET_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define NAME_STREET_CONTAINING_KEY_PREFIX NAME_KEY_PREFIX STREET_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define NAME_STREET_POSTCODE_KEY_PREFIX NAME_KEY_PREFIX STREET_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
#define NAME_PO_BOX_GEOHASH_KEY_PREFIX NAME_KEY_PREFIX PO_BOX_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define NAME_PO_BOX_CITY_KEY_PREFIX NAME_KEY_PREFIX PO_BOX_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define NAME_PO_BOX_CONTAINING_KEY_PREFIX NAME_KEY_PREFIX PO_BOX_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define NAME_PO_BOX_POSTCODE_KEY_PREFIX NAME_KEY_PREFIX PO_BOX_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
#define NAME_UNIT_GEOHASH_KEY_PREFIX NAME_KEY_PREFIX UNIT_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define NAME_UNIT_CITY_KEY_PREFIX NAME_KEY_PREFIX UNIT_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define NAME_UNIT_CONTAINING_KEY_PREFIX NAME_KEY_PREFIX UNIT_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define NAME_UNIT_POSTCODE_KEY_PREFIX NAME_KEY_PREFIX UNIT_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
#define NAME_GEOHASH_KEY_PREFIX NAME_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define NAME_CITY_KEY_PREFIX NAME_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define NAME_CONTAINING_KEY_PREFIX NAME_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define NAME_POSTCODE_KEY_PREFIX NAME_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
#define ADDRESS_UNIT_GEOHASH_KEY_PREFIX ADDRESS_KEY_PREFIX UNIT_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define ADDRESS_UNIT_CITY_KEY_PREFIX ADDRESS_KEY_PREFIX UNIT_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define ADDRESS_UNIT_CONTAINING_KEY_PREFIX ADDRESS_KEY_PREFIX UNIT_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define ADDRESS_UNIT_POSTCODE_KEY_PREFIX ADDRESS_KEY_PREFIX UNIT_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
#define ADDRESS_GEOHASH_KEY_PREFIX ADDRESS_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define ADDRESS_CITY_KEY_PREFIX ADDRESS_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define ADDRESS_CONTAINING_KEY_PREFIX ADDRESS_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define ADDRESS_POSTCODE_KEY_PREFIX ADDRESS_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
#define HOUSE_NUMBER_UNIT_GEOHASH_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX UNIT_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define HOUSE_NUMBER_UNIT_CITY_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX UNIT_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define HOUSE_NUMBER_UNIT_CONTAINING_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX UNIT_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define HOUSE_NUMBER_UNIT_POSTCODE_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX UNIT_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
#define HOUSE_NUMBER_GEOHASH_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define HOUSE_NUMBER_CITY_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define HOUSE_NUMBER_CONTAINING_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define HOUSE_NUMBER_POSTCODE_KEY_PREFIX HOUSE_NUMBER_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
#define STREET_GEOHASH_KEY_PREFIX STREET_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define STREET_CITY_KEY_PREFIX STREET_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define STREET_CONTAINING_KEY_PREFIX STREET_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define STREET_POSTCODE_KEY_PREFIX STREET_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
#define STREET_UNIT_GEOHASH_KEY_PREFIX STREET_KEY_PREFIX UNIT_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define STREET_UNIT_CITY_KEY_PREFIX STREET_KEY_PREFIX UNIT_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define STREET_UNIT_CONTAINING_KEY_PREFIX STREET_KEY_PREFIX UNIT_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define STREET_UNIT_POSTCODE_KEY_PREFIX STREET_KEY_PREFIX UNIT_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
#define PO_BOX_GEOHASH_KEY_PREFIX PO_BOX_KEY_PREFIX GEOHASH_KEY_PREFIX
|
||||
#define PO_BOX_CITY_KEY_PREFIX PO_BOX_KEY_PREFIX CITY_KEY_PREFIX
|
||||
#define PO_BOX_CONTAINING_KEY_PREFIX PO_BOX_KEY_PREFIX CONTAINING_BOUNDARY_PREFIX
|
||||
#define PO_BOX_POSTCODE_KEY_PREFIX PO_BOX_KEY_PREFIX POSTCODE_KEY_PREFIX
|
||||
|
||||
cstring_array *expanded_component_combined(char *input, libpostal_normalize_options_t options, size_t *n) {
|
||||
size_t num_expansions = 0;
|
||||
cstring_array *expansions = expand_address(input, options, &num_expansions);
|
||||
|
||||
size_t num_root_expansions = 0;
|
||||
cstring_array *root_expansions = expand_address_root(input, options, &num_root_expansions);
|
||||
|
||||
if (num_root_expansions == 0) {
|
||||
cstring_array_destroy(root_expansions);
|
||||
*n = num_expansions;
|
||||
return expansions;
|
||||
} else if (num_expansions == 0) {
|
||||
cstring_array_destroy(expansions);
|
||||
*n = num_root_expansions;
|
||||
return root_expansions;
|
||||
} else {
|
||||
khash_t(str_set) *unique_strings = kh_init(str_set);
|
||||
char *expansion;
|
||||
khiter_t k;
|
||||
int ret;
|
||||
|
||||
cstring_array *all_expansions = cstring_array_new();
|
||||
|
||||
for (size_t i = 0; i < num_expansions; i++) {
|
||||
expansion = cstring_array_get_string(expansions, i);
|
||||
k = kh_get(str_set, unique_strings, expansion);
|
||||
|
||||
if (k == kh_end(unique_strings)) {
|
||||
cstring_array_add_string(all_expansions, expansion);
|
||||
k = kh_put(str_set, unique_strings, expansion, &ret);
|
||||
if (ret < 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < num_root_expansions; i++) {
|
||||
expansion = cstring_array_get_string(root_expansions, i);
|
||||
k = kh_get(str_set, unique_strings, expansion);
|
||||
|
||||
if (k == kh_end(unique_strings)) {
|
||||
cstring_array_add_string(all_expansions, expansion);
|
||||
k = kh_put(str_set, unique_strings, expansion, &ret);
|
||||
if (ret < 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
*n = cstring_array_num_strings(all_expansions);
|
||||
|
||||
kh_destroy(str_set, unique_strings);
|
||||
cstring_array_destroy(root_expansions);
|
||||
cstring_array_destroy(expansions);
|
||||
|
||||
return all_expansions;
|
||||
}
|
||||
}
|
||||
|
||||
static inline cstring_array *expanded_component_root_with_fallback(char *input, libpostal_normalize_options_t options, size_t *n) {
|
||||
cstring_array *root_expansions = expand_address_root(input, options, n);
|
||||
if (*n > 0) {
|
||||
return root_expansions;
|
||||
} else {
|
||||
cstring_array_destroy(root_expansions);
|
||||
*n = 0;
|
||||
return expand_address(input, options, n);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static cstring_array *geohash_and_neighbors(double latitude, double longitude, size_t geohash_precision) {
|
||||
if (geohash_precision == 0) return NULL;
|
||||
|
||||
if (geohash_precision > MAX_GEOHASH_PRECISION) geohash_precision = MAX_GEOHASH_PRECISION;
|
||||
size_t geohash_len = geohash_precision + 1;
|
||||
|
||||
char geohash[geohash_len];
|
||||
if (geohash_encode(latitude, longitude, geohash, geohash_len) != GEOHASH_OK) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size_t neighbors_size = geohash_len * 8;
|
||||
char neighbors[neighbors_size];
|
||||
|
||||
int num_strings = 0;
|
||||
|
||||
if (geohash_neighbors(geohash, neighbors, neighbors_size, &num_strings) == GEOHASH_OK && num_strings == 8) {
|
||||
cstring_array *strings = cstring_array_new_size(9 * geohash_len);
|
||||
cstring_array_add_string(strings, geohash);
|
||||
|
||||
for (int i = 0; i < num_strings; i++) {
|
||||
char *neighbor = neighbors + geohash_len * i;
|
||||
cstring_array_add_string(strings, neighbor);
|
||||
}
|
||||
return strings;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#define MAX_NAME_TOKENS 50
|
||||
|
||||
|
||||
cstring_array *name_word_hashes(char *name, libpostal_normalize_options_t normalize_options) {
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_NAME | LIBPOSTAL_ADDRESS_ANY;
|
||||
size_t num_expansions = 0;
|
||||
cstring_array *name_expansions = expanded_component_root_with_fallback(name, normalize_options, &num_expansions);
|
||||
if (num_expansions == 0) {
|
||||
cstring_array_destroy(name_expansions);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size_t len = strlen(name);
|
||||
|
||||
char_array *token_string_array = char_array_new_size(len);
|
||||
cstring_array *strings = cstring_array_new_size(len);
|
||||
token_array *token_array = token_array_new();
|
||||
|
||||
char_array *combined_words_no_whitespace = char_array_new();
|
||||
|
||||
bool keep_whitespace = false;
|
||||
|
||||
khash_t(str_set) *unique_strings = kh_init(str_set);
|
||||
khiter_t k;
|
||||
int ret = 0;
|
||||
|
||||
for (size_t i = 0; i < num_expansions; i++) {
|
||||
char *expansion = cstring_array_get_string(name_expansions, i);
|
||||
log_debug("expansion = %s\n", expansion);
|
||||
tokenize_add_tokens(token_array, expansion, strlen(expansion), keep_whitespace);
|
||||
size_t num_tokens = token_array->n;
|
||||
token_t *tokens = token_array->a;
|
||||
token_t prev_token;
|
||||
char *token_str;
|
||||
for (size_t j = 0; j < num_tokens; j++) {
|
||||
token_t token = tokens[j];
|
||||
bool ideogram = is_ideographic(token.type);
|
||||
|
||||
string_script_t token_script = get_string_script(expansion + token.offset, token.len);
|
||||
bool is_latin = token_script.len == token.len && token_script.script == SCRIPT_LATIN;
|
||||
|
||||
char_array_clear(token_string_array);
|
||||
// For ideograms, since the "words" are characters, we use shingles of two characters
|
||||
if (ideogram && j > 0 && is_ideographic(prev_token.type)) {
|
||||
log_debug("cat ideogram\n");
|
||||
char_array_cat_len(token_string_array, expansion + prev_token.offset, prev_token.len);
|
||||
}
|
||||
|
||||
// For Latin script, add double metaphone of the words
|
||||
if (is_latin && !is_numeric_token(token.type) && !ideogram && !is_punctuation(token.type)) {
|
||||
char_array_clear(token_string_array);
|
||||
char_array_cat_len(token_string_array, expansion + token.offset, token.len);
|
||||
token_str = char_array_get_string(token_string_array);
|
||||
|
||||
log_debug("token_str = %s\n", token_str);
|
||||
|
||||
double_metaphone_codes_t *dm_codes = double_metaphone(token_str);
|
||||
if (dm_codes == NULL) {
|
||||
prev_token = token;
|
||||
continue;
|
||||
}
|
||||
char *dm_primary = dm_codes->primary;
|
||||
char *dm_secondary = dm_codes->secondary;
|
||||
|
||||
if (!string_equals(dm_primary, "")) {
|
||||
|
||||
k = kh_get(str_set, unique_strings, dm_primary);
|
||||
|
||||
if (k == kh_end(unique_strings) && kh_size(unique_strings) <= MAX_NAME_TOKENS) {
|
||||
log_debug("adding dm_primary = %s\n", dm_primary);
|
||||
cstring_array_add_string(strings, dm_primary);
|
||||
k = kh_put(str_set, unique_strings, strdup(dm_primary), &ret);
|
||||
if (ret < 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!string_equals(dm_secondary, dm_primary)) {
|
||||
|
||||
k = kh_get(str_set, unique_strings, dm_secondary);
|
||||
|
||||
if (k == kh_end(unique_strings) && kh_size(unique_strings) <= MAX_NAME_TOKENS) {
|
||||
log_debug("adding dm_secondary = %s\n", dm_secondary);
|
||||
cstring_array_add_string(strings, dm_secondary);
|
||||
k = kh_put(str_set, unique_strings, strdup(dm_secondary), &ret);
|
||||
if (ret < 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
double_metaphone_codes_destroy(dm_codes);
|
||||
// For non-Latin words (Arabic, Cyrllic, etc.) just add the word
|
||||
// For ideograms, we do two-character shingles, so only add the first character if the string has one token
|
||||
} else if (!ideogram || j > 0 || num_tokens == 1) {
|
||||
char_array_cat_len(token_string_array, expansion + token.offset, token.len);
|
||||
token_str = char_array_get_string(token_string_array);
|
||||
log_debug("token_str = %s\n", token_str);
|
||||
k = kh_get(str_set, unique_strings, token_str);
|
||||
|
||||
if (k == kh_end(unique_strings)) {
|
||||
cstring_array_add_string(strings, token_str);
|
||||
k = kh_put(str_set, unique_strings, strdup(token_str), &ret);
|
||||
if (ret < 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
prev_token = token;
|
||||
}
|
||||
|
||||
token_array_clear(token_array);
|
||||
}
|
||||
|
||||
char_array_destroy(token_string_array);
|
||||
token_array_destroy(token_array);
|
||||
char_array_destroy(combined_words_no_whitespace);
|
||||
|
||||
cstring_array_destroy(name_expansions);
|
||||
|
||||
const char *key;
|
||||
|
||||
kh_foreach_key(unique_strings, key, {
|
||||
free((char *)key);
|
||||
});
|
||||
kh_destroy(str_set, unique_strings);
|
||||
|
||||
return strings;
|
||||
}
|
||||
|
||||
|
||||
static inline void add_string_arrays_to_tree(string_tree_t *tree, size_t n, va_list args) {
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
cstring_array *a = va_arg(args, cstring_array *);
|
||||
size_t num_strings = cstring_array_num_strings(a);
|
||||
if (num_strings == 0) continue;
|
||||
for (size_t j = 0; j < num_strings; j++) {
|
||||
char *str = cstring_array_get_string(a, j);
|
||||
string_tree_add_string(tree, str);
|
||||
}
|
||||
string_tree_finalize_token(tree);
|
||||
}
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
static inline void add_hashes_from_tree(cstring_array *near_dupe_hashes, char *prefix, string_tree_t *tree) {
|
||||
string_tree_iterator_t *iter = string_tree_iterator_new(tree);
|
||||
if (iter->num_tokens > 0) {
|
||||
log_debug("iter->num_tokens = %zu\n", iter->num_tokens);
|
||||
|
||||
for (; !string_tree_iterator_done(iter); string_tree_iterator_next(iter)) {
|
||||
|
||||
cstring_array_start_token(near_dupe_hashes);
|
||||
cstring_array_append_string(near_dupe_hashes, prefix);
|
||||
|
||||
char *str;
|
||||
string_tree_iterator_foreach_token(iter, str, {
|
||||
cstring_array_append_string(near_dupe_hashes, "|");
|
||||
cstring_array_append_string(near_dupe_hashes, str);
|
||||
//log_debug("str=%s\n", str);
|
||||
});
|
||||
|
||||
cstring_array_terminate(near_dupe_hashes);
|
||||
}
|
||||
}
|
||||
|
||||
string_tree_iterator_destroy(iter);
|
||||
}
|
||||
|
||||
|
||||
static inline void add_string_hash_permutations(cstring_array *near_dupe_hashes, char *prefix, string_tree_t *tree, size_t n, ...) {
|
||||
string_tree_clear(tree);
|
||||
|
||||
log_debug("prefix=%s\n", prefix);
|
||||
|
||||
va_list args;
|
||||
va_start(args, n);
|
||||
add_string_arrays_to_tree(tree, n, args);
|
||||
va_end(args);
|
||||
|
||||
log_debug("string_tree_num_strings(tree)=%zu\n", string_tree_num_strings(tree));
|
||||
|
||||
add_hashes_from_tree(near_dupe_hashes, prefix, tree);
|
||||
}
|
||||
|
||||
|
||||
cstring_array *near_dupe_hashes_languages(size_t num_components, char **labels, char **values, libpostal_near_dupe_hash_options_t options, size_t num_languages, char **languages) {
|
||||
if (!options.with_latlon && !options.with_city_or_equivalent && !options.with_postal_code) return NULL;
|
||||
|
||||
place_t *place = place_from_components(num_components, labels, values);
|
||||
log_debug("created place\n");
|
||||
if (place == NULL) return NULL;
|
||||
|
||||
bool have_valid_geo = options.with_latlon;
|
||||
|
||||
if (!have_valid_geo && options.with_postal_code && place->postal_code != NULL) {
|
||||
have_valid_geo = true;
|
||||
}
|
||||
|
||||
if (!have_valid_geo && options.with_city_or_equivalent && (place->city != NULL || place->city_district != NULL || place->suburb != NULL || place->island != NULL)) {
|
||||
have_valid_geo = true;
|
||||
}
|
||||
|
||||
if (!have_valid_geo && options.with_small_containing_boundaries && (place->state_district != NULL)) {
|
||||
have_valid_geo = true;
|
||||
}
|
||||
|
||||
|
||||
if (!have_valid_geo) {
|
||||
log_debug("no valid geo\n");
|
||||
place_destroy(place);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
libpostal_normalize_options_t normalize_options = libpostal_get_default_options();
|
||||
|
||||
language_classifier_response_t *lang_response = NULL;
|
||||
|
||||
if (num_languages == 0) {
|
||||
lang_response = place_languages(num_components, labels, values);
|
||||
|
||||
if (lang_response != NULL) {
|
||||
log_debug("got %zu place languages\n", lang_response->num_languages);
|
||||
normalize_options.num_languages = lang_response->num_languages;
|
||||
normalize_options.languages = lang_response->languages;
|
||||
}
|
||||
} else {
|
||||
normalize_options.num_languages = num_languages;
|
||||
normalize_options.languages = languages;
|
||||
}
|
||||
|
||||
string_tree_t *tree = string_tree_new();
|
||||
|
||||
cstring_array *name_expansions = NULL;
|
||||
size_t num_name_expansions = 0;
|
||||
if (place->name != NULL && options.with_name) {
|
||||
log_debug("Doing name expansions for %s\n", place->name);
|
||||
name_expansions = name_word_hashes(place->name, normalize_options);
|
||||
if (name_expansions != NULL) {
|
||||
num_name_expansions = cstring_array_num_strings(name_expansions);
|
||||
log_debug("Got %zu name expansions\n", num_name_expansions);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
cstring_array *street_expansions = NULL;
|
||||
size_t num_street_expansions = 0;
|
||||
if (place->street != NULL) {
|
||||
log_debug("Doing street expansions for %s\n", place->street);
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_STREET | LIBPOSTAL_ADDRESS_ANY;
|
||||
street_expansions = expanded_component_combined(place->street, normalize_options, &num_street_expansions);
|
||||
log_debug("Got %zu street expansions\n", num_street_expansions);
|
||||
}
|
||||
|
||||
cstring_array *house_number_expansions = NULL;
|
||||
size_t num_house_number_expansions = 0;
|
||||
if (place->house_number != NULL) {
|
||||
log_debug("Doing house number expansions for %s\n", place->house_number);
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_HOUSE_NUMBER | LIBPOSTAL_ADDRESS_ANY;
|
||||
house_number_expansions = expand_address_root(place->house_number, normalize_options, &num_house_number_expansions);
|
||||
log_debug("Got %zu house number expansions\n", num_house_number_expansions);
|
||||
}
|
||||
|
||||
cstring_array *unit_expansions = NULL;
|
||||
size_t num_unit_expansions = 0;
|
||||
if (place->unit != NULL && options.with_unit) {
|
||||
log_debug("Doing unit expansions for %s\n", place->unit);
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_UNIT | LIBPOSTAL_ADDRESS_ANY;
|
||||
unit_expansions = expand_address_root(place->unit, normalize_options, &num_unit_expansions);
|
||||
log_debug("Got %zu unit expansions\n", num_unit_expansions);
|
||||
}
|
||||
|
||||
cstring_array *building_expansions = NULL;
|
||||
size_t num_building_expansions = 0;
|
||||
if (place->building != NULL && options.with_unit) {
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_UNIT | LIBPOSTAL_ADDRESS_ANY;
|
||||
building_expansions = expand_address_root(place->building, normalize_options, &num_building_expansions);
|
||||
}
|
||||
|
||||
cstring_array *level_expansions = NULL;
|
||||
size_t num_level_expansions = 0;
|
||||
if (place->level != NULL && options.with_unit) {
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_LEVEL | LIBPOSTAL_ADDRESS_ANY;
|
||||
level_expansions = expand_address_root(place->level, normalize_options, &num_level_expansions);
|
||||
}
|
||||
|
||||
cstring_array *po_box_expansions = NULL;
|
||||
size_t num_po_box_expansions = 0;
|
||||
if (place->po_box != NULL) {
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_PO_BOX | LIBPOSTAL_ADDRESS_ANY;
|
||||
po_box_expansions = expand_address_root(place->po_box, normalize_options, &num_po_box_expansions);
|
||||
}
|
||||
|
||||
cstring_array *place_expansions = NULL;
|
||||
cstring_array *containing_expansions = NULL;
|
||||
|
||||
if (options.with_city_or_equivalent) {
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_TOPONYM | LIBPOSTAL_ADDRESS_ANY;
|
||||
|
||||
if (place->city != NULL) {
|
||||
size_t num_city_expansions = 0;
|
||||
cstring_array *city_expansions = expand_address_root(place->city, normalize_options, &num_city_expansions);
|
||||
if (place_expansions == NULL) {
|
||||
place_expansions = city_expansions;
|
||||
} else if (city_expansions != NULL && num_city_expansions > 0) {
|
||||
cstring_array_extend(place_expansions, city_expansions);
|
||||
cstring_array_destroy(city_expansions);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (place->city_district != NULL) {
|
||||
size_t num_city_district_expansions = 0;
|
||||
cstring_array *city_district_expansions = expand_address_root(place->city_district, normalize_options, &num_city_district_expansions);
|
||||
if (place_expansions == NULL) {
|
||||
place_expansions = city_district_expansions;
|
||||
} else if (city_district_expansions != NULL && num_city_district_expansions > 0) {
|
||||
cstring_array_extend(place_expansions, city_district_expansions);
|
||||
cstring_array_destroy(city_district_expansions);
|
||||
}
|
||||
}
|
||||
|
||||
if (place->suburb != NULL) {
|
||||
size_t num_suburb_expansions = 0;
|
||||
cstring_array *suburb_expansions = expand_address_root(place->suburb, normalize_options, &num_suburb_expansions);
|
||||
if (place_expansions == NULL) {
|
||||
place_expansions = suburb_expansions;
|
||||
} else if (suburb_expansions != NULL && num_suburb_expansions > 0) {
|
||||
cstring_array_extend(place_expansions, suburb_expansions);
|
||||
cstring_array_destroy(suburb_expansions);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (place->island != NULL) {
|
||||
size_t num_island_expansions = 0;
|
||||
cstring_array *island_expansions = expand_address_root(place->island, normalize_options, &num_island_expansions);
|
||||
if (place_expansions == NULL) {
|
||||
place_expansions = island_expansions;
|
||||
} else if (island_expansions != NULL && num_island_expansions > 0) {
|
||||
cstring_array_extend(place_expansions, island_expansions);
|
||||
cstring_array_destroy(island_expansions);
|
||||
}
|
||||
}
|
||||
|
||||
if (place->state_district != NULL && options.with_small_containing_boundaries) {
|
||||
size_t num_state_district_expansions = 0;
|
||||
cstring_array *state_district_expansions = expand_address_root(place->state_district, normalize_options, &num_state_district_expansions);
|
||||
if (containing_expansions == NULL) {
|
||||
containing_expansions = state_district_expansions;
|
||||
} else if (state_district_expansions != NULL && num_state_district_expansions > 0) {
|
||||
cstring_array_extend(containing_expansions, state_district_expansions);
|
||||
cstring_array_destroy(state_district_expansions);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cstring_array *postal_code_expansions = NULL;
|
||||
size_t num_postal_code_expansions = 0;
|
||||
if (options.with_postal_code && place->postal_code != NULL) {
|
||||
normalize_options.address_components = LIBPOSTAL_ADDRESS_POSTAL_CODE | LIBPOSTAL_ADDRESS_ANY;
|
||||
postal_code_expansions = expand_address_root(place->postal_code, normalize_options, &num_postal_code_expansions);
|
||||
}
|
||||
|
||||
cstring_array *geohash_expansions = NULL;
|
||||
if (options.with_latlon && !(double_equals(options.latitude, 0.0) && double_equals(options.longitude, 0.0))) {
|
||||
geohash_expansions = geohash_and_neighbors(options.latitude, options.longitude, options.geohash_precision);
|
||||
}
|
||||
|
||||
size_t num_geohash_expansions = geohash_expansions != NULL ? cstring_array_num_strings(geohash_expansions) : 0;
|
||||
if (num_geohash_expansions == 0 && num_postal_code_expansions == 0 && place_expansions == NULL && containing_expansions == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
num_name_expansions = name_expansions != NULL ? cstring_array_num_strings(name_expansions) : 0;
|
||||
num_street_expansions = street_expansions != NULL ? cstring_array_num_strings(street_expansions) : 0;
|
||||
num_house_number_expansions = house_number_expansions != NULL ? cstring_array_num_strings(house_number_expansions) : 0;
|
||||
num_po_box_expansions = po_box_expansions != NULL ? cstring_array_num_strings(po_box_expansions) : 0;
|
||||
num_unit_expansions = unit_expansions != NULL ? cstring_array_num_strings(unit_expansions) : 0;
|
||||
num_building_expansions = building_expansions != NULL ? cstring_array_num_strings(building_expansions) : 0;
|
||||
num_level_expansions = level_expansions != NULL ? cstring_array_num_strings(level_expansions) : 0;
|
||||
|
||||
bool have_unit = num_unit_expansions > 0 || num_building_expansions > 0 || num_level_expansions > 0;
|
||||
cstring_array *unit_or_equivalent_expansions = NULL;
|
||||
if (num_unit_expansions > 0) {
|
||||
unit_or_equivalent_expansions = unit_expansions;
|
||||
} else if (num_building_expansions > 0) {
|
||||
unit_or_equivalent_expansions = building_expansions;
|
||||
} else if (num_level_expansions > 0) {
|
||||
unit_or_equivalent_expansions = level_expansions;
|
||||
}
|
||||
|
||||
cstring_array *near_dupe_hashes = cstring_array_new();
|
||||
|
||||
if (num_name_expansions > 0) {
|
||||
if (num_street_expansions > 0 && num_house_number_expansions > 0 && options.name_and_address_keys) {
|
||||
// Have street, house number, and unit
|
||||
if (have_unit) {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_ADDRESS_UNIT_GEOHASH_KEY_PREFIX, tree, 5, name_expansions, street_expansions, house_number_expansions, unit_or_equivalent_expansions, geohash_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_ADDRESS_UNIT_CITY_KEY_PREFIX, tree, 5, name_expansions, street_expansions, house_number_expansions, unit_or_equivalent_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_ADDRESS_UNIT_CONTAINING_KEY_PREFIX, tree, 5, name_expansions, street_expansions, house_number_expansions, unit_or_equivalent_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_ADDRESS_UNIT_POSTCODE_KEY_PREFIX, tree, 5, name_expansions, street_expansions, house_number_expansions, unit_or_equivalent_expansions, postal_code_expansions);
|
||||
}
|
||||
// Have street and house number, no unit
|
||||
} else {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_ADDRESS_GEOHASH_KEY_PREFIX, tree, 4, name_expansions, street_expansions, house_number_expansions, geohash_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_ADDRESS_CITY_KEY_PREFIX, tree, 4, name_expansions, street_expansions, house_number_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_ADDRESS_CONTAINING_KEY_PREFIX, tree, 4, name_expansions, street_expansions, house_number_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_ADDRESS_POSTCODE_KEY_PREFIX, tree, 4, name_expansions, street_expansions, house_number_expansions, postal_code_expansions);
|
||||
}
|
||||
}
|
||||
// Japan, other places with no street names
|
||||
} else if (num_house_number_expansions > 0 && options.name_and_address_keys) {
|
||||
// House number and unit
|
||||
if (have_unit) {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_HOUSE_NUMBER_UNIT_GEOHASH_KEY_PREFIX, tree, 4, name_expansions, house_number_expansions, unit_or_equivalent_expansions, geohash_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_HOUSE_NUMBER_UNIT_CITY_KEY_PREFIX, tree, 4, name_expansions, house_number_expansions, unit_or_equivalent_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_HOUSE_NUMBER_UNIT_CONTAINING_KEY_PREFIX, tree, 4, name_expansions, house_number_expansions, unit_or_equivalent_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_HOUSE_NUMBER_UNIT_POSTCODE_KEY_PREFIX, tree, 4, name_expansions, house_number_expansions, unit_or_equivalent_expansions, postal_code_expansions);
|
||||
}
|
||||
// House number, no unit
|
||||
} else {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_HOUSE_NUMBER_GEOHASH_KEY_PREFIX, tree, 3, name_expansions, house_number_expansions, geohash_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_HOUSE_NUMBER_CITY_KEY_PREFIX, tree, 3, name_expansions, house_number_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_HOUSE_NUMBER_CONTAINING_KEY_PREFIX, tree, 3, name_expansions, house_number_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_HOUSE_NUMBER_POSTCODE_KEY_PREFIX, tree, 3, name_expansions, house_number_expansions, postal_code_expansions);
|
||||
}
|
||||
}
|
||||
// Addresses in India, UK, Ireland, many university addresses, etc. may have house name + street with no house numbers
|
||||
} else if (num_street_expansions > 0 && options.name_and_address_keys) {
|
||||
// Have street, house number, and unit
|
||||
if (have_unit) {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_STREET_UNIT_GEOHASH_KEY_PREFIX, tree, 4, name_expansions, street_expansions, unit_or_equivalent_expansions, geohash_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_STREET_UNIT_CITY_KEY_PREFIX, tree, 4, name_expansions, street_expansions, unit_or_equivalent_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_STREET_UNIT_CONTAINING_KEY_PREFIX, tree, 4, name_expansions, street_expansions, unit_or_equivalent_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_STREET_UNIT_POSTCODE_KEY_PREFIX, tree, 4, name_expansions, street_expansions, unit_or_equivalent_expansions, postal_code_expansions);
|
||||
}
|
||||
// Have street and house number, no unit
|
||||
} else {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_STREET_GEOHASH_KEY_PREFIX, tree, 3, name_expansions, street_expansions, geohash_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_STREET_CITY_KEY_PREFIX, tree, 3, name_expansions, street_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_STREET_CONTAINING_KEY_PREFIX, tree, 3, name_expansions, street_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_STREET_POSTCODE_KEY_PREFIX, tree, 3, name_expansions, street_expansions, postal_code_expansions);
|
||||
}
|
||||
}
|
||||
// PO Box only addresses, mailing addresses
|
||||
} else if (num_po_box_expansions > 0 && options.name_and_address_keys) {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_PO_BOX_GEOHASH_KEY_PREFIX, tree, 3, name_expansions, po_box_expansions, geohash_expansions);
|
||||
}
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_PO_BOX_CITY_KEY_PREFIX, tree, 3, name_expansions, po_box_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_PO_BOX_CONTAINING_KEY_PREFIX, tree, 3, name_expansions, po_box_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_PO_BOX_POSTCODE_KEY_PREFIX, tree, 3, name_expansions, po_box_expansions, postal_code_expansions);
|
||||
}
|
||||
// Only name
|
||||
} else if (options.name_only_keys) {
|
||||
// Have name and unit, some university addresses
|
||||
if (have_unit) {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_UNIT_GEOHASH_KEY_PREFIX, tree, 3, name_expansions, unit_or_equivalent_expansions, geohash_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_UNIT_CITY_KEY_PREFIX, tree, 3, name_expansions, unit_or_equivalent_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_UNIT_CONTAINING_KEY_PREFIX, tree, 3, name_expansions, unit_or_equivalent_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_UNIT_POSTCODE_KEY_PREFIX, tree, 3, name_expansions, unit_or_equivalent_expansions, postal_code_expansions);
|
||||
}
|
||||
// Have name and geo only
|
||||
} else {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_GEOHASH_KEY_PREFIX, tree, 2, name_expansions, geohash_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_CITY_KEY_PREFIX, tree, 2, name_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_CONTAINING_KEY_PREFIX, tree, 2, name_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, NAME_POSTCODE_KEY_PREFIX, tree, 2, name_expansions, postal_code_expansions);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (options.address_only_keys) {
|
||||
if (num_street_expansions > 0 && num_house_number_expansions > 0) {
|
||||
// Have street, house number, and unit
|
||||
if (have_unit) {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, ADDRESS_UNIT_GEOHASH_KEY_PREFIX, tree, 4, street_expansions, house_number_expansions, unit_or_equivalent_expansions, geohash_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, ADDRESS_UNIT_CITY_KEY_PREFIX, tree, 4, street_expansions, house_number_expansions, unit_or_equivalent_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, ADDRESS_UNIT_CONTAINING_KEY_PREFIX, tree, 4, street_expansions, house_number_expansions, unit_or_equivalent_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, ADDRESS_UNIT_POSTCODE_KEY_PREFIX, tree, 4, street_expansions, house_number_expansions, unit_or_equivalent_expansions, postal_code_expansions);
|
||||
}
|
||||
// Have street and house number, no unit
|
||||
} else {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, ADDRESS_GEOHASH_KEY_PREFIX, tree, 3, street_expansions, house_number_expansions, geohash_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, ADDRESS_CITY_KEY_PREFIX, tree, 3, street_expansions, house_number_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, ADDRESS_CONTAINING_KEY_PREFIX, tree, 3, street_expansions, house_number_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, ADDRESS_POSTCODE_KEY_PREFIX, tree, 3, street_expansions, house_number_expansions, postal_code_expansions);
|
||||
}
|
||||
}
|
||||
// Japan, other places with no street names
|
||||
} else if (num_house_number_expansions > 0) {
|
||||
// House number and unit
|
||||
if (have_unit) {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, HOUSE_NUMBER_UNIT_GEOHASH_KEY_PREFIX, tree, 3, house_number_expansions, unit_or_equivalent_expansions, geohash_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, HOUSE_NUMBER_UNIT_CITY_KEY_PREFIX, tree, 3, house_number_expansions, unit_or_equivalent_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, HOUSE_NUMBER_UNIT_CONTAINING_KEY_PREFIX, tree, 3, house_number_expansions, unit_or_equivalent_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, HOUSE_NUMBER_UNIT_POSTCODE_KEY_PREFIX, tree, 3, house_number_expansions, unit_or_equivalent_expansions, postal_code_expansions);
|
||||
}
|
||||
// House number, no unit
|
||||
} else {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, HOUSE_NUMBER_GEOHASH_KEY_PREFIX, tree, 2, house_number_expansions, geohash_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, HOUSE_NUMBER_CITY_KEY_PREFIX, tree, 2, house_number_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, HOUSE_NUMBER_CONTAINING_KEY_PREFIX, tree, 2, house_number_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, HOUSE_NUMBER_POSTCODE_KEY_PREFIX, tree, 2, house_number_expansions, postal_code_expansions);
|
||||
}
|
||||
}
|
||||
// Addresses in India, UK, Ireland, many university addresses, etc. may have house name + street with no house numbers
|
||||
} else if (num_street_expansions > 0) {
|
||||
// Have street, house number, and unit
|
||||
if (have_unit) {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, STREET_UNIT_GEOHASH_KEY_PREFIX, tree, 3, street_expansions, unit_or_equivalent_expansions, geohash_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, STREET_UNIT_CITY_KEY_PREFIX, tree, 3, street_expansions, unit_or_equivalent_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, STREET_UNIT_CONTAINING_KEY_PREFIX, tree, 3, street_expansions, unit_or_equivalent_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, STREET_UNIT_POSTCODE_KEY_PREFIX, tree, 3, street_expansions, unit_or_equivalent_expansions, postal_code_expansions);
|
||||
}
|
||||
// Have street and house number, no unit
|
||||
} else {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, STREET_GEOHASH_KEY_PREFIX, tree, 2, street_expansions, geohash_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, STREET_CITY_KEY_PREFIX, tree, 2, street_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, STREET_CONTAINING_KEY_PREFIX, tree, 2, street_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, STREET_POSTCODE_KEY_PREFIX, tree, 2, street_expansions, postal_code_expansions);
|
||||
}
|
||||
}
|
||||
// PO Box only addresses, mailing addresses
|
||||
} else if (num_po_box_expansions > 0) {
|
||||
if (geohash_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, PO_BOX_GEOHASH_KEY_PREFIX, tree, 2, po_box_expansions, geohash_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, PO_BOX_CITY_KEY_PREFIX, tree, 2, po_box_expansions, place_expansions);
|
||||
}
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, PO_BOX_CONTAINING_KEY_PREFIX, tree, 2, po_box_expansions, containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
add_string_hash_permutations(near_dupe_hashes, PO_BOX_POSTCODE_KEY_PREFIX, tree, 2, po_box_expansions, postal_code_expansions);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (place != NULL) {
|
||||
place_destroy(place);
|
||||
}
|
||||
|
||||
if (tree != NULL) {
|
||||
string_tree_destroy(tree);
|
||||
}
|
||||
|
||||
if (name_expansions != NULL) {
|
||||
cstring_array_destroy(name_expansions);
|
||||
}
|
||||
|
||||
if (street_expansions != NULL) {
|
||||
cstring_array_destroy(street_expansions);
|
||||
}
|
||||
|
||||
if (house_number_expansions != NULL) {
|
||||
cstring_array_destroy(house_number_expansions);
|
||||
}
|
||||
|
||||
if (unit_expansions != NULL) {
|
||||
cstring_array_destroy(unit_expansions);
|
||||
}
|
||||
|
||||
if (building_expansions != NULL) {
|
||||
cstring_array_destroy(building_expansions);
|
||||
}
|
||||
|
||||
if (level_expansions != NULL) {
|
||||
cstring_array_destroy(level_expansions);
|
||||
}
|
||||
|
||||
if (po_box_expansions != NULL) {
|
||||
cstring_array_destroy(po_box_expansions);
|
||||
}
|
||||
|
||||
if (place_expansions != NULL) {
|
||||
cstring_array_destroy(place_expansions);
|
||||
}
|
||||
|
||||
|
||||
if (containing_expansions != NULL) {
|
||||
cstring_array_destroy(containing_expansions);
|
||||
}
|
||||
|
||||
if (postal_code_expansions != NULL) {
|
||||
cstring_array_destroy(postal_code_expansions);
|
||||
}
|
||||
|
||||
if (geohash_expansions != NULL) {
|
||||
cstring_array_destroy(geohash_expansions);
|
||||
}
|
||||
|
||||
if (lang_response != NULL) {
|
||||
language_classifier_response_destroy(lang_response);
|
||||
}
|
||||
|
||||
return near_dupe_hashes;
|
||||
}
|
||||
|
||||
inline cstring_array *near_dupe_hashes(size_t num_components, char **labels, char **values, libpostal_near_dupe_hash_options_t options) {
|
||||
return near_dupe_hashes_languages(num_components, labels, values, options, 0, NULL);
|
||||
}
|
||||
14
src/near_dupe.h
Normal file
14
src/near_dupe.h
Normal file
@@ -0,0 +1,14 @@
|
||||
|
||||
#ifndef NEAR_DUPE_H
|
||||
#define NEAR_DUPE_H
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include "libpostal.h"
|
||||
#include "string_utils.h"
|
||||
|
||||
cstring_array *near_dupe_hashes(size_t num_components, char **labels, char **values, libpostal_near_dupe_hash_options_t options);
|
||||
cstring_array *near_dupe_hashes_languages(size_t num_components, char **labels, char **values, libpostal_near_dupe_hash_options_t options, size_t num_languages, char **languages);
|
||||
|
||||
#endif
|
||||
124
src/near_dupe_test.c
Normal file
124
src/near_dupe_test.c
Normal file
@@ -0,0 +1,124 @@
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libpostal.h"
|
||||
#include "string_utils.h"
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
if (argc < 3) {
|
||||
printf("Usage: ./test_near_dupe label value [...]\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
if (!libpostal_setup() || !libpostal_setup_language_classifier()) {
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
libpostal_near_dupe_hash_options_t options = libpostal_get_near_dupe_hash_default_options();
|
||||
|
||||
cstring_array *labels_array = cstring_array_new();
|
||||
cstring_array *values_array = cstring_array_new();
|
||||
cstring_array *languages_array = NULL;
|
||||
|
||||
bool label = true;
|
||||
bool next_is_latitude = false;
|
||||
bool next_is_longitude = false;
|
||||
bool next_is_geohash_precision = false;
|
||||
bool have_latitude = false;
|
||||
bool have_longitude = false;
|
||||
bool next_is_language = false;
|
||||
double longitude = 0.0;
|
||||
double latitude = 0.0;
|
||||
|
||||
|
||||
for (size_t i = 1; i < argc; i++) {
|
||||
char *arg = argv[i];
|
||||
|
||||
if (string_equals(arg, "--with-unit")) {
|
||||
options.with_unit = true;
|
||||
} else if (string_equals(arg, "--latitude")) {
|
||||
next_is_latitude = true;
|
||||
} else if (string_equals(arg, "--longitude")) {
|
||||
next_is_longitude = true;
|
||||
} else if (string_equals(arg, "--geohash-precision")) {
|
||||
next_is_geohash_precision = true;
|
||||
} else if (string_equals(arg, "--name-only-keys")) {
|
||||
options.name_only_keys = true;
|
||||
} else if (string_equals(arg, "--address-only-keys")) {
|
||||
options.address_only_keys = true;
|
||||
} else if (string_equals(arg, "--language")) {
|
||||
next_is_language = true;
|
||||
} else if (next_is_latitude) {
|
||||
sscanf(arg, "%lf", &latitude);
|
||||
next_is_latitude = false;
|
||||
have_latitude = true;
|
||||
} else if (next_is_longitude) {
|
||||
sscanf(arg, "%lf", &longitude);
|
||||
next_is_longitude = false;
|
||||
have_longitude = true;
|
||||
} else if (next_is_geohash_precision) {
|
||||
size_t geohash_precision = 0;
|
||||
sscanf(arg, "%zu", &geohash_precision);
|
||||
options.geohash_precision = geohash_precision;
|
||||
next_is_geohash_precision = false;
|
||||
} else if (next_is_language) {
|
||||
if (languages_array == NULL) {
|
||||
languages_array = cstring_array_new();
|
||||
}
|
||||
cstring_array_add_string(languages_array, arg);
|
||||
} else if (label) {
|
||||
cstring_array_add_string(labels_array, arg);
|
||||
label = false;
|
||||
} else {
|
||||
cstring_array_add_string(values_array, arg);
|
||||
label = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (have_latitude && have_longitude) {
|
||||
options.with_latlon = true;
|
||||
options.latitude = latitude;
|
||||
options.longitude = longitude;
|
||||
}
|
||||
|
||||
size_t num_languages = 0;
|
||||
char **languages = NULL;
|
||||
if (languages_array != NULL) {
|
||||
num_languages = cstring_array_num_strings(languages_array);
|
||||
languages = cstring_array_to_strings(languages_array);
|
||||
}
|
||||
|
||||
|
||||
size_t num_components = cstring_array_num_strings(labels_array);
|
||||
if (num_components != cstring_array_num_strings(values_array)) {
|
||||
cstring_array_destroy(labels_array);
|
||||
cstring_array_destroy(values_array);
|
||||
printf("Must have same number of labels and values\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
char **labels = cstring_array_to_strings(labels_array);
|
||||
char **values = cstring_array_to_strings(values_array);
|
||||
|
||||
size_t num_near_dupe_hashes = 0;
|
||||
char **near_dupe_hashes = libpostal_near_dupe_hashes_languages(num_components, labels, values, options, num_languages, languages, &num_near_dupe_hashes);
|
||||
if (near_dupe_hashes != NULL) {
|
||||
for (size_t i = 0; i < num_near_dupe_hashes; i++) {
|
||||
char *near_dupe_hash = near_dupe_hashes[i];
|
||||
printf("%s\n", near_dupe_hash);
|
||||
}
|
||||
|
||||
libpostal_expansion_array_destroy(near_dupe_hashes, num_near_dupe_hashes);
|
||||
}
|
||||
|
||||
libpostal_expansion_array_destroy(labels, num_components);
|
||||
libpostal_expansion_array_destroy(values, num_components);
|
||||
|
||||
if (languages != NULL) {
|
||||
libpostal_expansion_array_destroy(languages, num_languages);
|
||||
}
|
||||
|
||||
libpostal_teardown();
|
||||
libpostal_teardown_language_classifier();
|
||||
|
||||
}
|
||||
@@ -401,9 +401,12 @@ void add_normalized_token(char_array *array, char *str, token_t token, uint64_t
|
||||
char *append_if_not_numeric = NULL;
|
||||
|
||||
int32_t ch;
|
||||
int32_t next_ch;
|
||||
ssize_t char_len;
|
||||
ssize_t next_char_len;
|
||||
|
||||
bool last_was_letter = false;
|
||||
bool last_was_number = false;
|
||||
bool append_char = true;
|
||||
|
||||
while (idx < len) {
|
||||
@@ -417,13 +420,21 @@ void add_normalized_token(char_array *array, char *str, token_t token, uint64_t
|
||||
bool is_letter = utf8_is_letter(cat);
|
||||
bool is_number = utf8_is_number(cat);
|
||||
|
||||
next_char_len = utf8proc_iterate(ptr + char_len, len, &next_ch);
|
||||
int next_cat = utf8proc_category(next_ch);
|
||||
bool next_is_number = utf8_is_number(next_cat);
|
||||
bool next_is_letter = utf8_is_letter(next_cat);
|
||||
|
||||
|
||||
bool is_full_stop = ch == FULL_STOP_CODEPOINT;
|
||||
|
||||
if (is_hyphen && last_was_letter && options & NORMALIZE_TOKEN_REPLACE_HYPHENS) {
|
||||
bool is_hyphen_between_letter_and_number = is_hyphen && ((next_is_number && last_was_letter) || (next_is_letter && last_was_number));
|
||||
|
||||
if (is_hyphen && options & NORMALIZE_TOKEN_REPLACE_HYPHENS && (!(last_was_number && next_is_number) || options & NORMALIZE_TOKEN_REPLACE_NUMERIC_HYPHENS)) {
|
||||
char_array_append(array, " ");
|
||||
append_char = false;
|
||||
} else if (is_hyphen && options & NORMALIZE_TOKEN_DELETE_HYPHENS) {
|
||||
append_char = false;
|
||||
append_char = !is_hyphen_between_letter_and_number;
|
||||
}
|
||||
|
||||
if ((is_hyphen || is_full_stop) && token.type == NUMERIC && options & NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC && last_was_letter) {
|
||||
@@ -444,7 +455,7 @@ void add_normalized_token(char_array *array, char *str, token_t token, uint64_t
|
||||
append_char = false;
|
||||
}
|
||||
|
||||
if (options & NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC && token.type == NUMERIC && last_was_letter && is_number && !alpha_numeric_split) {
|
||||
if (options & NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC && token.type == NUMERIC && ((last_was_letter && is_number) || (last_was_number && is_letter)) && !alpha_numeric_split) {
|
||||
char_array_append(array, " ");
|
||||
alpha_numeric_split = true;
|
||||
}
|
||||
@@ -482,7 +493,7 @@ void add_normalized_token(char_array *array, char *str, token_t token, uint64_t
|
||||
append_char = true;
|
||||
|
||||
last_was_letter = is_letter;
|
||||
|
||||
last_was_number = is_number;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -30,34 +30,38 @@ As well as normalizations for individual string tokens:
|
||||
|
||||
#include "constants.h"
|
||||
#include "klib/khash.h"
|
||||
#include "libpostal.h"
|
||||
#include "string_utils.h"
|
||||
#include "utf8proc/utf8proc.h"
|
||||
#include "unicode_scripts.h"
|
||||
#include "numex.h"
|
||||
#include "scanner.h"
|
||||
#include "transliterate.h"
|
||||
#include "trie.h"
|
||||
#include "tokens.h"
|
||||
#include "vector.h"
|
||||
|
||||
#define NORMALIZE_STRING_LATIN_ASCII 1 << 0
|
||||
#define NORMALIZE_STRING_TRANSLITERATE 1 << 1
|
||||
#define NORMALIZE_STRING_STRIP_ACCENTS 1 << 2
|
||||
#define NORMALIZE_STRING_DECOMPOSE 1 << 3
|
||||
#define NORMALIZE_STRING_LOWERCASE 1 << 4
|
||||
#define NORMALIZE_STRING_TRIM 1 << 5
|
||||
#define NORMALIZE_STRING_REPLACE_HYPHENS 1 << 6
|
||||
#define NORMALIZE_STRING_COMPOSE 1 << 7
|
||||
#define NORMALIZE_STRING_SIMPLE_LATIN_ASCII 1 << 8
|
||||
#define NORMALIZE_STRING_REPLACE_NUMEX 1 << 9
|
||||
#define NORMALIZE_STRING_LATIN_ASCII LIBPOSTAL_NORMALIZE_STRING_LATIN_ASCII
|
||||
#define NORMALIZE_STRING_TRANSLITERATE LIBPOSTAL_NORMALIZE_STRING_TRANSLITERATE
|
||||
#define NORMALIZE_STRING_STRIP_ACCENTS LIBPOSTAL_NORMALIZE_STRING_STRIP_ACCENTS
|
||||
#define NORMALIZE_STRING_DECOMPOSE LIBPOSTAL_NORMALIZE_STRING_DECOMPOSE
|
||||
#define NORMALIZE_STRING_LOWERCASE LIBPOSTAL_NORMALIZE_STRING_LOWERCASE
|
||||
#define NORMALIZE_STRING_TRIM LIBPOSTAL_NORMALIZE_STRING_TRIM
|
||||
#define NORMALIZE_STRING_REPLACE_HYPHENS LIBPOSTAL_NORMALIZE_STRING_REPLACE_HYPHENS
|
||||
#define NORMALIZE_STRING_COMPOSE LIBPOSTAL_NORMALIZE_STRING_COMPOSE
|
||||
#define NORMALIZE_STRING_SIMPLE_LATIN_ASCII LIBPOSTAL_NORMALIZE_STRING_SIMPLE_LATIN_ASCII
|
||||
#define NORMALIZE_STRING_REPLACE_NUMEX LIBPOSTAL_NORMALIZE_STRING_REPLACE_NUMEX
|
||||
|
||||
#define NORMALIZE_TOKEN_REPLACE_HYPHENS 1 << 0
|
||||
#define NORMALIZE_TOKEN_DELETE_HYPHENS 1 << 1
|
||||
#define NORMALIZE_TOKEN_DELETE_FINAL_PERIOD 1 << 2
|
||||
#define NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS 1 << 3
|
||||
#define NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES 1 << 4
|
||||
#define NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE 1 << 5
|
||||
#define NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC 1 << 6
|
||||
#define NORMALIZE_TOKEN_REPLACE_DIGITS 1 << 7
|
||||
#define NORMALIZE_TOKEN_REPLACE_HYPHENS LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_HYPHENS
|
||||
#define NORMALIZE_TOKEN_DELETE_HYPHENS LIBPOSTAL_NORMALIZE_TOKEN_DELETE_HYPHENS
|
||||
#define NORMALIZE_TOKEN_DELETE_FINAL_PERIOD LIBPOSTAL_NORMALIZE_TOKEN_DELETE_FINAL_PERIOD
|
||||
#define NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS LIBPOSTAL_NORMALIZE_TOKEN_DELETE_ACRONYM_PERIODS
|
||||
#define NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES LIBPOSTAL_NORMALIZE_TOKEN_DROP_ENGLISH_POSSESSIVES
|
||||
#define NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE LIBPOSTAL_NORMALIZE_TOKEN_DELETE_OTHER_APOSTROPHE
|
||||
#define NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC LIBPOSTAL_NORMALIZE_TOKEN_SPLIT_ALPHA_FROM_NUMERIC
|
||||
#define NORMALIZE_TOKEN_REPLACE_DIGITS LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_DIGITS
|
||||
#define NORMALIZE_TOKEN_REPLACE_NUMERIC_TOKEN_LETTERS LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_NUMERIC_TOKEN_LETTERS
|
||||
#define NORMALIZE_TOKEN_REPLACE_NUMERIC_HYPHENS LIBPOSTAL_NORMALIZE_TOKEN_REPLACE_NUMERIC_HYPHENS
|
||||
|
||||
// Replace digits with capital D e.g. 10013 => DDDDD, intended for use with lowercased strings
|
||||
#define DIGIT_CHAR "D"
|
||||
|
||||
120
src/numex.c
120
src/numex.c
@@ -439,7 +439,7 @@ bool numex_table_read(FILE *f) {
|
||||
|
||||
log_debug("read num_languages = %" PRIu64 "\n", num_languages);
|
||||
|
||||
int i = 0;
|
||||
size_t i = 0;
|
||||
|
||||
numex_language_t *language;
|
||||
|
||||
@@ -541,7 +541,7 @@ bool numex_table_write(FILE *f) {
|
||||
|
||||
numex_rule_t rule;
|
||||
|
||||
int i = 0;
|
||||
size_t i = 0;
|
||||
|
||||
for (i = 0; i < num_rules; i++) {
|
||||
rule = numex_table->rules->a[i];
|
||||
@@ -848,14 +848,21 @@ numex_result_array *convert_numeric_expressions(char *str, char *lang) {
|
||||
log_debug("Last token was RIGHT_CONTEXT_ADD, value=%" PRId64 "\n", result.value);
|
||||
} else if (prev_rule.rule_type != NUMEX_NULL && rule.rule_type != NUMEX_STOPWORD && (!whole_tokens_only || complete_token)) {
|
||||
log_debug("Had previous token with no context, finishing previous rule before returning\n");
|
||||
|
||||
result.len = prev_result_len;
|
||||
number_finished = true;
|
||||
complete_token = false;
|
||||
advance_index = false;
|
||||
state = start_state;
|
||||
prev_rule_was_number = true;
|
||||
rule = prev_rule = NUMEX_NULL_RULE;
|
||||
prev_result_len = 0;
|
||||
} else if (prev_rule.rule_type != NUMEX_NULL && rule.rule_type != NUMEX_STOPWORD && whole_tokens_only && !complete_token) {
|
||||
log_debug("whole_tokens_only = %d, complete_token = %d\n", whole_tokens_only, complete_token);
|
||||
rule = NUMEX_NULL_RULE;
|
||||
last_was_separator = false;
|
||||
prev_rule_was_number = false;
|
||||
state.state = NUMEX_SEARCH_STATE_SKIP_TOKEN;
|
||||
continue;
|
||||
} else if (rule.left_context_type == NUMEX_LEFT_CONTEXT_CONCAT_ONLY_IF_NUMBER && !prev_rule_was_number) {
|
||||
log_debug("LEFT_CONTEXT_CONCAT_ONLY_IF_NUMBER, no context\n");
|
||||
prev_rule = rule;
|
||||
@@ -885,7 +892,6 @@ numex_result_array *convert_numeric_expressions(char *str, char *lang) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
prev_rule_was_number = prev_rule_was_number || prev_rule.rule_type != NUMEX_NULL;
|
||||
|
||||
if (rule.rule_type != NUMEX_STOPWORD) {
|
||||
@@ -903,6 +909,7 @@ numex_result_array *convert_numeric_expressions(char *str, char *lang) {
|
||||
if (rule.right_context_type == NUMEX_RIGHT_CONTEXT_NONE && !whole_tokens_only) {
|
||||
number_finished = true;
|
||||
}
|
||||
|
||||
log_debug("rule is ordinal\n");
|
||||
}
|
||||
|
||||
@@ -941,6 +948,7 @@ numex_result_array *convert_numeric_expressions(char *str, char *lang) {
|
||||
log_debug("Adding phrase, value=%" PRId64 "\n", result.value);
|
||||
result = NULL_NUMEX_RESULT;
|
||||
number_finished = false;
|
||||
rule = prev_rule = NUMEX_NULL_RULE;
|
||||
}
|
||||
|
||||
prev_state = state;
|
||||
@@ -1060,6 +1068,7 @@ size_t possible_ordinal_digit_len(char *str, size_t len) {
|
||||
int32_t ch;
|
||||
|
||||
size_t digit_len = 0;
|
||||
bool seen_first_digit = false;
|
||||
|
||||
while (idx < len) {
|
||||
ssize_t char_len = utf8proc_iterate(ptr, len, &ch);
|
||||
@@ -1071,10 +1080,14 @@ size_t possible_ordinal_digit_len(char *str, size_t len) {
|
||||
// 0-9 only for this
|
||||
is_digit = ch >= 48 && ch <= 57;
|
||||
|
||||
if ((idx == 0 && !is_digit) || (idx > 0 && is_digit && !last_was_digit)) {
|
||||
if ((seen_first_digit && is_digit && !last_was_digit)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (is_digit && !seen_first_digit) {
|
||||
seen_first_digit = true;
|
||||
}
|
||||
|
||||
if (is_digit) {
|
||||
digit_len += char_len;
|
||||
}
|
||||
@@ -1124,23 +1137,115 @@ size_t ordinal_suffix_len(char *str, size_t len, char *lang) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline bool is_roman_numeral_char(char c) {
|
||||
return (c == 'i' ||
|
||||
c == 'v' ||
|
||||
c == 'x' ||
|
||||
c == 'l' ||
|
||||
c == 'c' ||
|
||||
c == 'd' ||
|
||||
c == 'm' ||
|
||||
c == 'I' ||
|
||||
c == 'V' ||
|
||||
c == 'X' ||
|
||||
c == 'L' ||
|
||||
c == 'C' ||
|
||||
c == 'D' ||
|
||||
c == 'M');
|
||||
}
|
||||
|
||||
static inline bool is_likely_single_roman_numeral_char(char c) {
|
||||
return (c == 'i' ||
|
||||
c == 'v' ||
|
||||
c == 'x' ||
|
||||
c == 'I' ||
|
||||
c == 'V' ||
|
||||
c == 'X');
|
||||
}
|
||||
|
||||
|
||||
bool is_valid_roman_numeral(char *str, size_t len) {
|
||||
char *copy = strndup(str, len);
|
||||
if (copy == NULL) return false;
|
||||
|
||||
numex_result_array *results = convert_numeric_expressions(copy, LATIN_LANGUAGE_CODE);
|
||||
if (results == NULL) {
|
||||
free(copy);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ret = results->n == 1 && results->a[0].len == len;
|
||||
numex_result_array_destroy(results);
|
||||
free(copy);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool is_likely_roman_numeral_len(char *str, size_t len) {
|
||||
bool seen_roman = false;
|
||||
for (size_t i = 0; i < len; i++) {
|
||||
char c = *(str + i);
|
||||
if (c == 0) break;
|
||||
if ((len <= 2 && is_likely_single_roman_numeral_char(c)) || (len > 2 && is_roman_numeral_char(c))) {
|
||||
seen_roman = true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return seen_roman && is_valid_roman_numeral(str, len);
|
||||
}
|
||||
|
||||
inline bool is_likely_roman_numeral(char *str) {
|
||||
return is_likely_roman_numeral_len(str, strlen(str));
|
||||
}
|
||||
|
||||
char *replace_numeric_expressions(char *str, char *lang) {
|
||||
numex_result_array *results = convert_numeric_expressions(str, lang);
|
||||
if (results == NULL) return NULL;
|
||||
|
||||
bool is_latin = string_equals(lang, LATIN_LANGUAGE_CODE);
|
||||
|
||||
size_t len = strlen(str);
|
||||
|
||||
char_array *replacement = char_array_new_size(len);
|
||||
size_t start = 0;
|
||||
size_t end = 0;
|
||||
|
||||
for (int i = 0; i < results->n; i++) {
|
||||
numex_result_t result = results->a[i];
|
||||
bool have_valid_numex = false;
|
||||
numex_result_t result = NULL_NUMEX_RESULT;
|
||||
|
||||
for (size_t i = 0; i < results->n; i++) {
|
||||
result = results->a[i];
|
||||
|
||||
if (result.len == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (is_latin && result.len <= 2 && !is_likely_roman_numeral_len(str + result.start, result.len)) {
|
||||
continue;
|
||||
}
|
||||
have_valid_numex = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!have_valid_numex) {
|
||||
numex_result_array_destroy(results);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < results->n; i++) {
|
||||
result = results->a[i];
|
||||
|
||||
if (result.len == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (is_latin && result.len <= 2 && !is_likely_roman_numeral_len(str + result.start, result.len)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
end = result.start;
|
||||
|
||||
log_debug("lang=%s, start = %zu, len = %zu, value=%" PRId64 "\n", lang, result.start, result.len, result.value);
|
||||
@@ -1170,3 +1275,4 @@ char *replace_numeric_expressions(char *str, char *lang) {
|
||||
|
||||
return char_array_to_string(replacement);
|
||||
}
|
||||
|
||||
|
||||
@@ -152,6 +152,9 @@ numex_result_array *convert_numeric_expressions(char *str, char *lang);
|
||||
size_t ordinal_suffix_len(char *s, size_t len, char *lang);
|
||||
size_t possible_ordinal_digit_len(char *str, size_t len);
|
||||
|
||||
bool is_likely_roman_numeral(char *str);
|
||||
bool is_likely_roman_numeral_len(char *str, size_t len);
|
||||
|
||||
bool numex_table_write(FILE *file);
|
||||
bool numex_table_save(char *filename);
|
||||
|
||||
|
||||
181
src/place.c
Normal file
181
src/place.c
Normal file
@@ -0,0 +1,181 @@
|
||||
#include "place.h"
|
||||
#include "address_parser.h"
|
||||
|
||||
static inline bool is_address_text_component(char *label) {
|
||||
return (string_equals(label, ADDRESS_PARSER_LABEL_HOUSE) ||
|
||||
string_equals(label, ADDRESS_PARSER_LABEL_ROAD) ||
|
||||
string_equals(label, ADDRESS_PARSER_LABEL_METRO_STATION) ||
|
||||
string_equals(label, ADDRESS_PARSER_LABEL_SUBURB) ||
|
||||
string_equals(label, ADDRESS_PARSER_LABEL_CITY_DISTRICT) ||
|
||||
string_equals(label, ADDRESS_PARSER_LABEL_CITY) ||
|
||||
string_equals(label, ADDRESS_PARSER_LABEL_STATE_DISTRICT) ||
|
||||
string_equals(label, ADDRESS_PARSER_LABEL_ISLAND) ||
|
||||
string_equals(label, ADDRESS_PARSER_LABEL_STATE) ||
|
||||
string_equals(label, ADDRESS_PARSER_LABEL_COUNTRY_REGION) ||
|
||||
string_equals(label, ADDRESS_PARSER_LABEL_COUNTRY) ||
|
||||
string_equals(label, ADDRESS_PARSER_LABEL_WORLD_REGION)
|
||||
);
|
||||
}
|
||||
|
||||
language_classifier_response_t *place_languages(size_t num_components, char **labels, char **values) {
|
||||
if (num_components == 0 || values == NULL || labels == NULL) return NULL;
|
||||
|
||||
language_classifier_response_t *lang_response = NULL;
|
||||
|
||||
char *label;
|
||||
char *value;
|
||||
|
||||
size_t total_size = 0;
|
||||
for (size_t i = 0; i < num_components; i++) {
|
||||
value = values[i];
|
||||
label = labels[i];
|
||||
if (is_address_text_component(label)) {
|
||||
total_size += strlen(value);
|
||||
// extra char for spaces
|
||||
if (i < num_components - 1) {
|
||||
total_size++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
char_array *combined = char_array_new_size(total_size);
|
||||
if (combined == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < num_components; i++) {
|
||||
value = values[i];
|
||||
label = labels[i];
|
||||
if (is_address_text_component(label)) {
|
||||
char_array_cat(combined, value);
|
||||
if (i < num_components - 1) {
|
||||
char_array_cat(combined, " ");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
char *combined_input = char_array_get_string(combined);
|
||||
|
||||
lang_response = classify_languages(combined_input);
|
||||
|
||||
char_array_destroy(combined);
|
||||
return lang_response;
|
||||
}
|
||||
|
||||
|
||||
|
||||
place_t *place_new(void) {
|
||||
place_t *place = calloc(1, sizeof(place_t));
|
||||
return place;
|
||||
}
|
||||
|
||||
void place_destroy(place_t *place) {
|
||||
if (place == NULL) return;
|
||||
free(place);
|
||||
}
|
||||
|
||||
|
||||
place_t *place_from_components(size_t num_components, char **labels, char **values) {
|
||||
if (num_components == 0 || labels == NULL || values == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
place_t *place = place_new();
|
||||
if (place == NULL) return NULL;
|
||||
|
||||
for (size_t i = 0; i < num_components; i++) {
|
||||
char *value = values[i];
|
||||
char *label = labels[i];
|
||||
if (string_equals(label, ADDRESS_PARSER_LABEL_ROAD)) {
|
||||
if (place->street == NULL) {
|
||||
place->street = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_HOUSE)) {
|
||||
if (place->name == NULL) {
|
||||
place->name = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_HOUSE_NUMBER)) {
|
||||
if (place->house_number == NULL) {
|
||||
place->house_number = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_POSTAL_CODE)) {
|
||||
if (place->postal_code == NULL) {
|
||||
place->postal_code = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_CITY)) {
|
||||
if (place->city == NULL) {
|
||||
place->city = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_STATE)) {
|
||||
if (place->state == NULL) {
|
||||
place->state = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_COUNTRY)) {
|
||||
if (place->country == NULL) {
|
||||
place->country = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_SUBURB)) {
|
||||
if (place->suburb == NULL) {
|
||||
place->suburb = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_CITY_DISTRICT)) {
|
||||
if (place->city_district == NULL) {
|
||||
place->city_district = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_STATE_DISTRICT)) {
|
||||
if (place->state_district == NULL) {
|
||||
place->state_district = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_COUNTRY_REGION)) {
|
||||
if (place->country_region == NULL) {
|
||||
place->country_region = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_ISLAND)) {
|
||||
if (place->island == NULL) {
|
||||
place->island = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_WORLD_REGION)) {
|
||||
if (place->world_region == NULL) {
|
||||
place->world_region = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_UNIT)) {
|
||||
if (place->unit == NULL) {
|
||||
place->unit = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_TELEPHONE)) {
|
||||
if (place->telephone == NULL) {
|
||||
place->telephone = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_WEBSITE)) {
|
||||
if (place->website == NULL) {
|
||||
place->website = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_LEVEL)) {
|
||||
if (place->level == NULL) {
|
||||
place->level = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_PO_BOX)) {
|
||||
if (place->po_box == NULL) {
|
||||
place->po_box = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_BUILDING)) {
|
||||
if (place->building == NULL) {
|
||||
place->building = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_STAIRCASE)) {
|
||||
if (place->staircase == NULL) {
|
||||
place->staircase = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_ENTRANCE)) {
|
||||
if (place->entrance == NULL) {
|
||||
place->entrance = value;
|
||||
}
|
||||
} else if (string_equals(label, ADDRESS_PARSER_LABEL_METRO_STATION)) {
|
||||
if (place->metro_station == NULL) {
|
||||
place->metro_station = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return place;
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user