指纹 Token 过滤器
编辑指纹 Token 过滤器
编辑对 Token 流进行排序并删除重复的 Token,然后将流连接成单个输出 Token。
例如,此过滤器将 [ the, fox, was, very, very, quick ]
Token 流更改为如下所示:
- 按字母顺序将 Token 排序为
[ fox, quick, the, very, very, was ]
- 删除
very
Token 的重复实例。 - 将 Token 流连接到单个输出 Token:
[fox quick the very was ]
此过滤器生成的输出 Token 可用于对正文进行指纹识别和聚类,如 OpenRefine 项目中所述。
此过滤器使用 Lucene 的 FingerprintFilter。
示例
编辑以下 analyze API 请求使用 fingerprint
过滤器为文本 zebra jumps over resting resting dog
创建单个输出 Token。
resp = client.indices.analyze( tokenizer="whitespace", filter=[ "fingerprint" ], text="zebra jumps over resting resting dog", ) print(resp)
response = client.indices.analyze( body: { tokenizer: 'whitespace', filter: [ 'fingerprint' ], text: 'zebra jumps over resting resting dog' } ) puts response
const response = await client.indices.analyze({ tokenizer: "whitespace", filter: ["fingerprint"], text: "zebra jumps over resting resting dog", }); console.log(response);
GET _analyze { "tokenizer" : "whitespace", "filter" : ["fingerprint"], "text" : "zebra jumps over resting resting dog" }
过滤器生成以下 Token:
[ dog jumps over resting zebra ]
添加到分析器
编辑以下 创建索引 API 请求使用 fingerprint
过滤器来配置新的自定义分析器。
resp = client.indices.create( index="fingerprint_example", settings={ "analysis": { "analyzer": { "whitespace_fingerprint": { "tokenizer": "whitespace", "filter": [ "fingerprint" ] } } } }, ) print(resp)
response = client.indices.create( index: 'fingerprint_example', body: { settings: { analysis: { analyzer: { whitespace_fingerprint: { tokenizer: 'whitespace', filter: [ 'fingerprint' ] } } } } } ) puts response
const response = await client.indices.create({ index: "fingerprint_example", settings: { analysis: { analyzer: { whitespace_fingerprint: { tokenizer: "whitespace", filter: ["fingerprint"], }, }, }, }, }); console.log(response);
PUT fingerprint_example { "settings": { "analysis": { "analyzer": { "whitespace_fingerprint": { "tokenizer": "whitespace", "filter": [ "fingerprint" ] } } } } }
可配置参数
编辑自定义
编辑要自定义 fingerprint
过滤器,请复制它以创建新的自定义 Token 过滤器的基础。您可以使用其可配置参数修改过滤器。
例如,以下请求创建一个自定义的 fingerprint
过滤器,该过滤器使用 +
来连接 Token 流。该过滤器还将输出 Token 限制为 100
个或更少的字符。
resp = client.indices.create( index="custom_fingerprint_example", settings={ "analysis": { "analyzer": { "whitespace_": { "tokenizer": "whitespace", "filter": [ "fingerprint_plus_concat" ] } }, "filter": { "fingerprint_plus_concat": { "type": "fingerprint", "max_output_size": 100, "separator": "+" } } } }, ) print(resp)
response = client.indices.create( index: 'custom_fingerprint_example', body: { settings: { analysis: { analyzer: { whitespace_: { tokenizer: 'whitespace', filter: [ 'fingerprint_plus_concat' ] } }, filter: { fingerprint_plus_concat: { type: 'fingerprint', max_output_size: 100, separator: '+' } } } } } ) puts response
const response = await client.indices.create({ index: "custom_fingerprint_example", settings: { analysis: { analyzer: { whitespace_: { tokenizer: "whitespace", filter: ["fingerprint_plus_concat"], }, }, filter: { fingerprint_plus_concat: { type: "fingerprint", max_output_size: 100, separator: "+", }, }, }, }, }); console.log(response);
PUT custom_fingerprint_example { "settings": { "analysis": { "analyzer": { "whitespace_": { "tokenizer": "whitespace", "filter": [ "fingerprint_plus_concat" ] } }, "filter": { "fingerprint_plus_concat": { "type": "fingerprint", "max_output_size": 100, "separator": "+" } } } } }