windows下载zip

linux下载tar

下载地址:https://www.elastic.co/downloads/elasticsearch

解压后运行:bin/elasticsearch (or bin\elasticsearch.bat on Windows)

检查是否成功:访问 http://localhost:9200

linux下不能以root用户运行,

普通用户运行报错:

java.nio.file.AccessDeniedException

原因:当前用户没有执行权限

解决方法: chown linux用户名 elasticsearch安装目录 -R

例如:chown ealsticsearch /data/wwwroot/elasticsearch-6.2.4 -R

PS:其他Java软件报.AccessDeniedException错误也可以同样方式解决,给 执行用户相应的目录权限即可

2|0代码实例

如下的代码实现类似链家网小区搜索功能。

从文件读取小区及地址信息写入es,然后通过小区所在城市code及搜索关键字 匹配到对应小区。

代码主要包含三部分内容:

1.创建索引

2.用bulk将批量数据存储到es

3.数据搜索

注意:

代码的es版本交低2.xx版本,高版本在创建的索引数据类型有所不同

#coding:utf8
from __future__ import unicode_literals
import os
import time
import config
from datetime import datetime
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk

class ElasticSearch():
  def __init__(self, index_name,index_type,ip ="127.0.0.1"):
    '''
    :param index_name: 索引名称
    :param index_type: 索引类型
    '''
    self.index_name =index_name
    self.index_type = index_type
    # 无用户名密码状态
    #self.es = Elasticsearch([ip])
    #用户名密码状态
    self.es = Elasticsearch([ip],http_auth=('elastic', 'password'),port=9200)
  def create_index(self,index_name="ftech360",index_type="community"):
    '''
    创建索引,创建索引名称为ott,类型为ott_type的索引
    :param ex: Elasticsearch对象
    :return:
    '''
    #创建映射
    _index_mappings = {
      "mappings": {
        self.index_type: {
          "properties": {
            "city_code": {
              "type": "string",
              # "index": "not_analyzed"
            },
            "name": {
              "type": "string",
              # "index": "not_analyzed"
            },
            "address": {
              "type": "string",
              # "index": "not_analyzed"
            }
          }
        }

      }
    }
    if self.es.indices.exists(index=self.index_name) is True:
      self.es.indices.delete(index=self.index_name)
    res = self.es.indices.create(index=self.index_name, body=_index_mappings)
    print res

  def build_data_dict(self):
    name_dict = {}
    with open(os.path.join(config.datamining_dir,'data_output','house_community.dat')) as f:
      for line in f:
        line_list = line.decode('utf-8').split('\t')
        community_code = line_list[6]
        name = line_list[7]
        city_code = line_list[0]
        name_dict[community_code] = (name,city_code)

    address_dict = {}
    with open(os.path.join(config.datamining_dir,'data_output','house_community_detail.dat')) as f:
      for line in f:
        line_list = line.decode('utf-8').split('\t')
        community_code = line_list[6]
        address = line_list[10]
        address_dict[community_code] = address

    return name_dict,address_dict

  def bulk_index_data(self,name_dict,address_dict):
    '''
    用bulk将批量数据存储到es
    :return:
    '''
    list_data = []
    for community_code, data in name_dict.items():
      tmp = {}
      tmp['code'] = community_code
      tmp['name'] = data[0]
      tmp['city_code'] = data[1]
      
      if community_code in address_dict:
        tmp['address'] = address_dict[community_code]
      else:
        tmp['address'] = ''

      list_data.append(tmp)
    ACTIONS = []
    for line in list_data:
      action = {
        "_index": self.index_name,
        "_type": self.index_type,
        "_id": line['code'], #_id 小区code
        "_source": {
          "city_code": line['city_code'],
          "name": line['name'],
          "address": line['address']
          }
      }
      ACTIONS.append(action)
      # 批量处理
    success, _ = bulk(self.es, ACTIONS, index=self.index_name, raise_on_error=True)
    #单条写入 单条写入速度很慢
    #self.es.index(index=self.index_name,doc_type="doc_type_test",body = action)

    print('Performed %d actions' % success)

  def delete_index_data(self,id):
    '''
    删除索引中的一条
    :param id:
    :return:
    '''
    res = self.es.delete(index=self.index_name, doc_type=self.index_type, id=id)
    print res

  def get_data_id(self,id):
    res = self.es.get(index=self.index_name, doc_type=self.index_type,id=id)
    # # 输出查询到的结果
    print res['_source']['city_code'], res['_id'], res['_source']['name'], res['_source']['address']

  def get_data_by_body(self, name, city_code):
    # doc = {'query': {'match_all': {}}}
    doc = {
      "query": {
        "bool":{
          "filter":{
            "term":{
            "city_code": city_code
            }
          },
          "must":{
            "multi_match": {
              "query": name,
              "type":"phrase_prefix",
              "fields": ['name^3', 'address'],
              "slop":1,
              
              }

          }
        }
      }
    }
    _searched = self.es.search(index=self.index_name, doc_type=self.index_type, body=doc)
    data = _searched['hits']['hits']
    return data
     

if __name__=='__main__':
  #数据插入es
  obj = ElasticSearch("ftech360","community")
  obj.create_index()
  name_dict, address_dict = obj.build_data_dict()
  obj.bulk_index_data(name_dict,address_dict)

  #从es读取数据
  obj2 = ElasticSearch("ftech360","community")
  obj2.get_data_by_body(u'保利','510100')

以上就是全部知识点内容,感谢大家的阅读和对的支持。

华山资源网 Design By www.eoogi.com
广告合作:本站广告合作请联系QQ:858582 申请时备注:广告合作(否则不回)
免责声明:本站资源来自互联网收集,仅供用于学习和交流,请遵循相关法律法规,本站一切资源不代表本站立场,如有侵权、后门、不妥请联系本站删除!
华山资源网 Design By www.eoogi.com

稳了!魔兽国服回归的3条重磅消息!官宣时间再确认!

昨天有一位朋友在大神群里分享,自己亚服账号被封号之后居然弹出了国服的封号信息对话框。

这里面让他访问的是一个国服的战网网址,com.cn和后面的zh都非常明白地表明这就是国服战网。

而他在复制这个网址并且进行登录之后,确实是网易的网址,也就是我们熟悉的停服之后国服发布的暴雪游戏产品运营到期开放退款的说明。这是一件比较奇怪的事情,因为以前都没有出现这样的情况,现在突然提示跳转到国服战网的网址,是不是说明了简体中文客户端已经开始进行更新了呢?