UAX URL 邮箱分词器

编辑

uax_url_email 分词器类似于standard 分词器,不同之处在于它将 URL 和电子邮件地址识别为单个词元。

示例输出

编辑
resp = client.indices.analyze(
    tokenizer="uax_url_email",
    text="Email me at [email protected]",
)
print(resp)
response = client.indices.analyze(
  body: {
    tokenizer: 'uax_url_email',
    text: 'Email me at [email protected]'
  }
)
puts response
const response = await client.indices.analyze({
  tokenizer: "uax_url_email",
  text: "Email me at [email protected]",
});
console.log(response);
POST _analyze
{
  "tokenizer": "uax_url_email",
  "text": "Email me at [email protected]"
}

上述句子将产生以下词项

[ Email, me, at, [email protected] ]

standard 分词器将产生

[ Email, me, at, john.smith, global, international.com ]

配置

编辑

uax_url_email 分词器接受以下参数

max_token_length

最大词元长度。如果看到的词元超过此长度,则会在 max_token_length 的间隔处进行分割。默认为 255

示例配置

编辑

在此示例中,我们将 uax_url_email 分词器的 max_token_length 配置为 5(用于演示目的)

resp = client.indices.create(
    index="my-index-000001",
    settings={
        "analysis": {
            "analyzer": {
                "my_analyzer": {
                    "tokenizer": "my_tokenizer"
                }
            },
            "tokenizer": {
                "my_tokenizer": {
                    "type": "uax_url_email",
                    "max_token_length": 5
                }
            }
        }
    },
)
print(resp)

resp1 = client.indices.analyze(
    index="my-index-000001",
    analyzer="my_analyzer",
    text="[email protected]",
)
print(resp1)
response = client.indices.create(
  index: 'my-index-000001',
  body: {
    settings: {
      analysis: {
        analyzer: {
          my_analyzer: {
            tokenizer: 'my_tokenizer'
          }
        },
        tokenizer: {
          my_tokenizer: {
            type: 'uax_url_email',
            max_token_length: 5
          }
        }
      }
    }
  }
)
puts response

response = client.indices.analyze(
  index: 'my-index-000001',
  body: {
    analyzer: 'my_analyzer',
    text: '[email protected]'
  }
)
puts response
const response = await client.indices.create({
  index: "my-index-000001",
  settings: {
    analysis: {
      analyzer: {
        my_analyzer: {
          tokenizer: "my_tokenizer",
        },
      },
      tokenizer: {
        my_tokenizer: {
          type: "uax_url_email",
          max_token_length: 5,
        },
      },
    },
  },
});
console.log(response);

const response1 = await client.indices.analyze({
  index: "my-index-000001",
  analyzer: "my_analyzer",
  text: "[email protected]",
});
console.log(response1);
PUT my-index-000001
{
  "settings": {
    "analysis": {
      "analyzer": {
        "my_analyzer": {
          "tokenizer": "my_tokenizer"
        }
      },
      "tokenizer": {
        "my_tokenizer": {
          "type": "uax_url_email",
          "max_token_length": 5
        }
      }
    }
  }
}

POST my-index-000001/_analyze
{
  "analyzer": "my_analyzer",
  "text": "[email protected]"
}

上面的示例产生以下词项

[ john, smith, globa, l, inter, natio, nal.c, om ]