当前位置: 移动技术网 > IT编程>脚本编程>Python > python hbase读取数据发送kafka的方法

python hbase读取数据发送kafka的方法

2019年01月07日  | 移动技术网IT编程  | 我要评论

fm9230,陈可书博客,魔法学园异闻录

本例子实现从hbase获取数据,并发送kafka。

使用

#!/usr/bin/env python
#coding=utf-8
 
import sys
import time
import json
 
sys.path.append('/usr/local/lib/python3.5/site-packages')
from thrift import thrift
from thrift.transport import tsocket
from thrift.transport import ttransport
from thrift.protocol import tbinaryprotocol
from hbase1 import hbase #调用hbase thrif1
from hbase1.ttypes import *
from kafka import kafkaconsumer
from kafka import kafkaproducer
from kafka.errors import kafkaerror
import unittest
 
class hbaseopreator:
 def __init__(self,host,port,table='test'):
  self.tablename=table
  self.transport=ttransport.tbufferedtransport(tsocket.tsocket(host,port))
  self.protocol=tbinaryprotocol.tbinaryprotocol(self.transport)
  self.client=hbase.client(self.protocol)
  self.transport.open()
 
 def __del__(self):
  self.transport.close()
 
 
 def scantablefilter(self,table,*args):
  d=dict() 
  l=[]
  try:
   tablename=table
   # scan = hbase.tscan(startrow, stoprow)
   scan=tscan()
   #主键首字母123
   # filter = "prefixfilter('123_')"
   # filter = "rowfilter(=,'regexstring:.aaa')"
   #过滤条件,当前为 statis_date 字段,值为20170223
   # fitler = "singlecolumnvaluefilter(tablename,'f','statis_date','20170223')"
   # filter="singlecolumnvaluefilter('f','statis_date',=,'binary:20170223') and singlecolumnvaluefilter('f','name',=,'binary:lxs')"
   filter="singlecolumnvaluefilter('info','name',=,'binary:lilei') or singlecolumnvaluefilter('info','name',=,'binary:lily')"
   scan.filterstring=filter
   id=self.client.scanneropenwithscan(tablename,scan,none)
   result=self.client.scannerget(id)
   # result=self.client.scannergetlist(id,100)
   while result:
    for r in result:
     key=r.row
     name=r.columns.get('info:name').value
     age=r.columns.get('info:age').value
     phone=r.columns.get('info:phone').value
     d['key']=key
     d['name']=name
     d['age']=age
     d['phone']=phone
     # encode_result_json=json.dumps(d).encode(encoding="utf-8")
     # print(encode_result_json)
     l.append(d)         
    result=self.client.scannerget(id)    
   return json.dumps(l).encode(encoding="utf-8")  
  finally:
   # self.client.scannerclose(scan)
   print("scan finish")
 
def sendkfafkaproduct(data):
 # self.host_port='localhost:9092'
 producer = kafkaproducer(bootstrap_servers=['localhost:9092'])
 for d in data:
  producer.send('test', key=b'lxs', value=d)
  time.sleep(5)
  print(d)
 
 while true:
  producer.send('test', key=b'lxs', value=data)
  time.sleep(5)
  print(data)
 
if __name__== '__main__':
 # unittest.main()
 
 b=hbaseopreator('10.27.1.138',9090)
 value=b.scantablefilter('ns_lbi:test_hbase_student')
 print(value)
 #sendkfafkaproduct(value)
 

以上这篇python hbase读取数据发送kafka的方法就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持移动技术网。

如对本文有疑问,请在下面进行留言讨论,广大热心网友会与你互动!! 点击进行留言回复

相关文章:

验证码:
移动技术网