1 Elasticsearch的文档增删查改(CURD)
## 新增文档(即便类型和索引不存在,也能增加)
PUT lqz/_doc/1
{
"name":"顾老二",
"age":30,
"from": "gu",
"desc": "皮肤黑、武器长、性格直",
"tags": ["黑", "长", "直"]
}
## 更新(两种都可以)
POST lqz/_doc/1/_update
{
"doc": {
"desc": "皮肤很黄,武器很长,性格很直",
"tags": ["很黄","很长", "很直"]
}
}
POST lqz/_update/1/
{
"doc": {
"desc": "确实很黄,武器很长,性格很直",
"tags": ["很黄黄","很长", "很直"]
}
}
## 删除
DELETE lqz/_doc/4
### 简单查询
GET lqz/_doc/1
## 复杂查询很多操作---》es的操作,最重要的就是查询
# es使用场景---》搜索功能用----》原来数据在mysql中----》同步到es中---》以后只要mysql新增记录,都同步到es中---》搜索的时候,去es中搜索---》返回给前端,完成搜索功能
2 Elasticsearch之查询的两种方式
# 方式一:查询字符串(用的少)
GET lqz/_doc/_search?q=from:gu
GET lqz/_doc/_search?q=age:30
# 方式二:结构化查询
GET lqz/_doc/_search
{
"query": {
"match": {
"from": "gu"
}
}
}
GET lqz/_search
{
"query": {
"match": {
"age": 30
}
}
}
3 term与match查询
# 结构化查询
# match 和 term的区别 面试会问?
# match:会对搜索的关键词进行分词,按分词去搜索
# term:不会对搜索的关键字进行分词,而直接搜索,精准匹配
### term不分词,精准搜索----》武器很长---》搜不到结果
-存数据的时候,用了分词 【确实 很 黄,武器 很 长,性格 很 直】
### match分词 武器 长 ---》有结果
### 用的最多的都是分词的搜法---》分词的粒度 ----》分词器决定的(存的分词,搜的分词)
{
"query": {
"match": {
"age": 30
}
}
}
# match_all 查询所有
GET lqz/_search
{
"query": {
"match_all": {
}
}
}
## match_phrase 短语查询
GET t1/_doc/_search
{
"query": {
"match_phrase": {
"title": {
"query": "中国",
"slop": 4
}
}
}
}
# slop 把中国分词后,每个词之间有0--4个文字都能搜出来
## match_phrase_prefix 最左前缀查询
GET t3/_doc/_search
{
"query": {
"match_phrase_prefix": {
"desc": "bea"
}
}
}
## 多条件查询-->不能在match中加多个条件
GET lqz/_doc/_search
{
"query": {
"bool": {
"must": [
{
"match": {
"from": "gu"
}
},
{
"match": {
"age": "30"
}
}
]
}
}
}
4 Elasticsearch之排序查询
GET lqz/_doc/_search
{
"query": {
"match": {
"from": "gu"
}
},
"sort": [
{
"age": {
"order": "desc"
}
}
]
}
## desc 降序 asc 升序
## 不是什么数据类型都能排序
# 只支持数字和时间类型
5 Elasticsearch之分页查询
# "from": 2, 从第几条开始
# "size": 1 取几条
GET lqz/_doc/_search
{
"query": {
"match_all": {}
},
"from": 2,
"size": 1
}
6 Elasticsearch之布尔查询
# must(and) 与的条件
GET lqz/_doc/_search
{
"query": {
"bool": {
"must": [
{
"match": {
"from": "gu"
}
}
]
}
}
}
# should(or) 或者条件
GET lqz/_doc/_search
{
"query": {
"bool": {
"should": [
{
"match": {
"from": "gu"
}
},
{
"match": {
"age": 18
}
}
]
}
}
}
# must_not(not) 取反
GET lqz/doc/_search
{
"query": {
"bool": {
"must_not": [
{
"match": {
"from": "gu"
}
},
{
"match": {
"age": 18
}
}
]
}
}
}
# filter
GET lqz/doc/_search
{
"query": {
"bool": {
"must": [
{
"match": {
"from": "gu"
}
}
],
"filter": {
"range": {
"age": {
"gt": 25
}
}
}
}
}
}
# gt gte lt lte 用法
7 Elasticsearch之查询结果过滤
GET lqz/_doc/_search
{
"query": {
"match_all": {
}
},
"_source": ["name", "age"]
}
8 Elasticsearch之高亮查询
### 默认高亮样式
GET lqz/_doc/_search
{
"query": {
"match": {
"name": "石头"
}
},
"highlight": {
"fields": {
"name": {}
}
}
}
### 自定义高亮样式
GET lqz/_doc/_search
{
"query": {
"match": {
"desc": "貌美"
}
},
"highlight": {
"pre_tags": "<b class='key' style='color:red'>",
"post_tags": "</b>",
"fields": {
"desc": {}
}
}
}
9 Elasticsearch之聚合函数
# avg max min sum
select max(age) as my_avg
GET lqz/_doc/_search
{
"query": {
"match_all": {
}
},
"aggs": {
"my_max": {
"max": {
"field": "age"
}
}
},
"_source": ["name", "age"]
}
GET lqz/doc/_search
{
"query": {
"match": {
"from": "gu"
}
},
"aggs": {
"my_max": {
"max": {
"field": "age"
}
}
},
"size": 0
}
GET lqz/_doc/_search
{
"size": 0,
"query": {
"match_all": {}
},
"aggs": {
"age_group": {
"range": {
"field": "age",
"ranges": [
{
"from": 0,
"to": 26
},
{
"from": 26,
"to": 31
}
]
}
}
}
}
10 python 操作es
# elaticsearch:官方提供的,类似于原生操作,pymysql
https://github.com/elastic/elasticsearch-py
from elasticsearch import Elasticsearch
# 纯用requests模块,也可以实现
# import requests
# res=requests.put("http://localhost:9200/lqz5")
# print(res)
# 服务端 7.0.5版本 7的版本都行
# pip3 install elasticsearch==7.0.5
client=Elasticsearch("http://localhost:9200")
# 创建索引(Index)
# result = client.indices.create(index='user',ignore=400)
# print(result)
# 删除索引
# result = client.indices.delete(index='lqz3')
# print(result)
# 插入数据
'''
PUT news/_doc/1
{
"userid":"1",
"username":lqz,
"password":"123",
}
'''
# 把mysql的数据,同步到es中--》pymsql打开查询---》直接存到es中
# data = {'userid': '1', 'username': 'lqz','password':'123'}
# result = client.create(index='news', doc_type='_doc', id=1, body=data)
# print(result)
# 更新数据
'''
不用doc包裹会报错
ActionRequestValidationException[Validation Failed: 1: script or doc is missing
'''
# data ={'doc':{'userid': '1', 'username': 'lqz','password':'123ee','test':'test'}}
# result = client.update(index='news', doc_type='_doc', body=data, id=1)
# print(result)
# 删除数据
# result = client.delete(index='news', doc_type='_doc', id=1)
# print(result)
# 查询
# 查找所有文档
# query = {'query': {'match_all': {}}}
# 查找名字叫做jack的所有文档
# query = {'query': {'term': {'title': '国'}}}
query = {'query': {'match': {'title': '中国'}}}
# 查找年龄大于11的所有文档
# query = {'query': {'range': {'age': {'gt': 11}}}}
allDoc = client.search(index='t1',body=query)
print(allDoc)
# elaticsearch-dsl:高级库,类似于orm,django的orm一样
from datetime import datetime
from elasticsearch_dsl import Document, Date, Nested, Boolean, analyzer, InnerDoc, Completion, Keyword, Text, Integer
from elasticsearch_dsl.connections import connections
connections.create_connection(hosts=["localhost"])
class Article(Document):
title = Text(analyzer='ik_max_word', search_analyzer="ik_max_word", fields={'title': Keyword()})
author = Keyword()
class Index:
name = 'myindex'
# def save(self, **kwargs):
# return super(Article, self).save(**kwargs)
if __name__ == '__main__':
# Article.init() # 创建映射
# 保存数据
# article = Article()
# article.title = "python测试开发岗位"
# article.author = "lqz"
# article.save() # 数据就保存了
# 查询数据
# s=Article.search()
# s = s.filter('match', title="开发")
# results = s.execute()
# print(results[0].title)
# 删除数据
# s = Article.search()
# s = s.filter('match', title="开发").delete()
# print(s)
# 修改数据
s = Article().search()
s = s.filter('match', title="测试")
results = s.execute()
print(results[0])
results[0].title="测试开发"
results[0].save()
ik分词器:中文分词器---》压缩包---》解压后放到es的---》重启即可---》支持ik分词--》符合果然习惯
ik有两种分词方式:
-ik_smart:分的词少
-ik_max_word:分的词会多
11 搭建集群es
es使用两种不同的方式来发现对方:
广播--->在同一个网络中,只要开启多个es实例(用的少)
单播--->指定跟谁谁谁组成集群
# 4 台机器的集群
###机器一配置
cluster.name: my_es1
node.name: node1
network.host: 127.0.0.1
http.port: 9200
transport.tcp.port: 9300
discovery.zen.ping.unicast.hosts: ["127.0.0.1:9300", "127.0.0.1:9302", "127.0.0.1:9303", "127.0.0.1:9304"]
cluster.initial_master_nodes: ["127.0.0.1:9300"]
http.cors.enabled: true
http.cors.allow-origin: "*"
### 机器2配置
cluster.name: my_es1
node.name: node2
network.host: 127.0.0.1
http.port: 9202
transport.tcp.port: 9302
node.master: true
node.data: true
discovery.zen.ping.unicast.hosts: ["127.0.0.1:9300", "127.0.0.1:9302", "127.0.0.1:9303", "127.0.0.1:9304"]
http.cors.enabled: true
http.cors.allow-origin: "*"
###机器3配置
cluster.name: my_es1
node.name: node3
network.host: 127.0.0.1
http.port: 9203
transport.tcp.port: 9303
discovery.zen.ping.unicast.hosts: ["127.0.0.1:9300", "127.0.0.1:9302", "127.0.0.1:9303", "127.0.0.1:9304"]
http.cors.enabled: true
http.cors.allow-origin: "*"
### 机器4配置
cluster.name: my_es1
node.name: node4
network.host: 127.0.0.1
http.port: 9204
transport.tcp.port: 9304
node.master: true
node.data: true
discovery.zen.ping.unicast.hosts: ["127.0.0.1:9300", "127.0.0.1:9302", "127.0.0.1:9303", "127.0.0.1:9304"]
http.cors.enabled: true
http.cors.allow-origin: "*"
# 脑裂问题
discovery.zen.minimum_master_nodes: 3 # 集群节点个数除以2+1