当前位置: 移动技术网 > IT编程>脚本编程>Python > python通过urllib2爬网页上种子下载示例

python通过urllib2爬网页上种子下载示例

2019年04月01日  | 移动技术网IT编程  | 我要评论

张孟宁,中国工业地坪网,resample.xfm

通过urllib2、re模块抓种子

思路

1.用程序登录论坛(如果需要登录才能访问的版块)

2.访问指定版块

3.遍历帖子(先取指定页,再遍历页面所有帖子的url)

4.循环访问所有帖子url,从帖子页面代码中取种子下载地址(通过正则表达式或第三方页面解析库)

5.访问种子页面下载种子

复制代码 代码如下:

import urllib
import urllib2
import cookielib
import re
import sys
import os

# site is website address | fid is part id
site = "http://xxx.yyy.zzz/"
source = "thread0806.php?fid=x&search=&page="

btsave = "./clyzwm/"
if os.path.isdir(btsave):
 print btsave + " existing"
else:
 os.mkdir(btsave)

logfile = "./clyzwm/down.log"
errorfile = "./clyzwm/error.log"
sucfile = "./clyzwm/sucess.log"

headers = {'user-agent' : 'mozilla/5.0 (macintosh; intel mac os x 10_9_1) applewebkit/537.36 (khtml, like gecko) chrome/32.0.1700.77 safari/537.36', 
           'referer' : 'http://xxx.yyy.zzz/'}

def btdown(url, dirpath):
 logger(logfile, "download file : " + url)
 try:
  #pagecode = urllib2.urlopen(url).read()
  #print pagecode
  btstep1 = re.findall('http://[\w]+\.[\w]+\.[\w]{0,4}/[\w]{2,6}\.php\?[\w]{2,6}=([\w]+)', url, re.i)
  #print btstep1
  if len(btstep1)>0:
   ref = btstep1[0]
   downsite = ""
   downdata = {}
   if len(ref)>20:
    downsite = re.findall('http://www.[\w]+\.[\w]+/', url)[0]
    downsite = downsite + "download.php"
    reff = re.findall('input\stype=\"hidden\"\sname=\"reff\"\svalue=\"([\w=]+)\"', urllib2.urlopen(url).read(), re.i)[0]
    downdata = {'ref': ref, 'reff':reff, 'submit':'download'}
   else:
    downsite = "http://www.downhh.com/download.php"
    downdata = {'ref': ref, 'rulesubmit':'download'}
   #print "bt site - " +  downsite + "\n downdata:"
   #print downdata
   downdata = urllib.urlencode(downdata)
   downreq = urllib2.request(downsite, downdata)
   downreq.add_header('user-agent','mozilla/5.0 (macintosh; intel mac os x 10_9_1) applewebkit/537.36 (khtml, like gecko) chrome/32.0.1700.77 safari/537.36')
   downpost = urllib2.urlopen(downreq)
   stream = downpost.read(-1)
   if (len(stream) > 1000):
    downpost.close()
    name = btstep1[0]+ ".torrent"
    fw = open(dirpath + name, 'w')
    fw.write(stream)
    fw.close()
    logger(sucfile, url+"\n")
   else:
    logger(errorfile, url+"\n")
 except urllib2.urlerror, e:
  print e.reason

def logger(logfile, msg):
 print msg
 fw = open(logfile, 'a')
 fw.write(msg)
 fw.close()

for i in range(1, 1000):
 logger(logfile, "\n\n\n@ page " + str(i) + " ...")
 part = site + source + str(i)

 content = urllib2.urlopen(part).read()
 content = content.decode('gbk').encode('utf8')
 #print content

 pages = re.findall('<a\s+href=\"(htm_data/[\d]+/[\d]+/[\d]+\.html).*?<\/a>', content,re.i)
 #print pages

 for page in pages:
  page = site + page;
  #logger(logfile, "\n# visiting " + page + " ...")
  pagecode = urllib2.urlopen(page).read()
  #print pagecode
  zzjump = re.findall('http://www.viidii.info/\?http://[\w]+/[\w]+\?[\w]{2,6}=[\w]+' ,pagecode)  
  #zzjump = re.findall('http://www.viidii.info/\?http://[\w/\?=]*', pagecode)
  if len(zzjump) > 0:
   zzjump = zzjump[0]
   #print "- jump page - " + zzjump
   pagecode = urllib2.urlopen(page).read()
   zzpage = re.findall('http://[\w]+\.[\w]+\.[\w]+/link[\w]?\.php\?[\w]{2,6}=[\w]+' ,pagecode)
   if len(zzpage) > 0:
    zzpage = zzpage[0]
    logger(logfile, "\n- zhongzi page -" + zzpage)
    btdown(zzpage, btsave)
   else:
    logger(logfile, "\n. not found .")
  else:
   logger(logfile, "\n... not found ...")
  zzpage = re.findall('http://[\w]+\.[\w]+\.[\w]+/link[\w]?\.php\?ref=[\w]+' ,pagecode)

如对本文有疑问,请在下面进行留言讨论,广大热心网友会与你互动!! 点击进行留言回复

相关文章:

验证码:
移动技术网