51Testing软件测试论坛

 找回密码
 (注-册)加入51Testing

QQ登录

只需一步,快速开始

微信登录,快人一步

手机号码,快捷登录

查看: 1366|回复: 2
打印 上一主题 下一主题

使用casperjs获取javascript渲染生成的html内容

[复制链接]

该用户从未签到

跳转到指定楼层
1#
发表于 2019-2-21 17:05:56 | 只看该作者 回帖奖励 |倒序浏览 |阅读模式

文章摘要:其实这里casperjs与python没有直接关系,主要依赖casperjs调用phantomjs webkit获取html文件内容。长期以来,爬虫抓取 客户端javascript渲染生成的html页面 都极为 困难, Java里面有 HtmlUnit , 而Python里,我们可以使用独立的跨平台的 Ca

  1. //USAGE: E:\toolkit\n1k0-casperjs-e3a77d0\bin>python casperjs site.js --url=http://spys.ru/free-proxy-list/IE/ --outputfile='temp.html'

  2. var fs = require('fs');
  3. var casper = require('casper').create({
  4. pageSettings: {
  5. loadImages: false,      
  6. loadPlugins: false,      
  7. userAgent: 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36 LBBROWSER'
  8. },
  9. logLevel: "debug",//日志等级
  10. verbose: true    // 记录日志到控制台
  11. });
  12. var url = casper.cli.raw.get('url');
  13. var outputfile = casper.cli.raw.get('outputfile');
  14. //请求页面
  15. casper.start(url, function () {
  16. fs.write(outputfile, this.getHTML(), 'w');
  17. });

  18. casper.run();
复制代码

sperJS 。

创建site.js(接口文件,输入:url,输出:html file)

  1. import json
  2. import sys
  3. #import requests
  4. #import requests.utils, pickle
  5. from bs4 import BeautifulSoup
  6. import os.path,os
  7. import threading
  8. #from multiprocessing import Process, Manager
  9. from datetime import datetime
  10. import traceback
  11. import logging
  12. import re,random
  13. import subprocess
  14. import shutil
  15. import platform
  16. output_file =  os.path.join(os.path.dirname(os.path.realpath(__file__)),'proxy.txt')
  17. global_log  = 'http_proxy' + datetime.now().strftime('%Y-%m-%d') + '.log'
  18. if not os.path.exists(os.path.join(os.path.dirname(os.path.realpath(__file__)),'logs')):
  19.   os.mkdir(os.path.join(os.path.dirname(os.path.realpath(__file__)),'logs'))
  20. global_log = os.path.join(os.path.dirname(os.path.realpath(__file__)),'logs',global_log)
  21. logging.basicConfig(level=logging.DEBUG,format='[%(asctime)s] [%(levelname)s] [%(module)s] [%(funcName)s] [%(lineno)d] %(message)s',filename=global_log,filemode='a')
  22. log = logging.getLogger(__name__)
  23. #manager = Manager()
  24. #PROXY_LIST = manager.list()
  25. mutex = threading.Lock()
  26. PROXY_LIST = []
  27. def isWindows():
  28.   if "Windows" in str(platform.uname()):
  29.   return True
  30.   else:
  31.   return False
  32. def getTagsByAttrs(tagName,pageContent,attrName,attrRegValue):
  33.   soup = BeautifulSoup(pageContent)                                                                                                                                                                                          
  34.   return soup.find_all(tagName, { attrName : re.compile(attrRegValue) })
  35. def getTagsByAttrsExt(tagName,filename,attrName,attrRegValue):
  36.   if os.path.isfile(filename):
  37.   f = open(filename,'r')          
  38.   soup = BeautifulSoup(f)
  39.   f.close()
  40.   return soup.find_all(tagName, { attrName : re.compile(attrRegValue) })
  41.   else:
  42.   return None
  43. class Site1Thread(threading.Thread):
  44.   def __init__(self,outputFilePath):
  45.     threading.Thread.__init__(self)
  46.   self.outputFilePath = outputFilePath
  47.   self.fileName = str(random.randint(100,1000)) + ".html"
  48.   self.setName('Site1Thread')
  49.   def run(self):
  50.   site1_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),'site.js')
  51.   site2_file = os.path.join(self.outputFilePath,'site.js')
  52.   if not os.path.isfile(site2_file) and os.path.isfile(site1_file):
  53.     shutil.copy(site1_file,site2_file)
  54.   #proc = subprocess.Popen(["bash","-c", "cd %s && ./casperjs site.js --url=http://spys.ru/free-proxy-list/IE/ --outputfile=%s" % (self.outputFilePath,self.fileName) ],stdout=subprocess.PIPE)
  55.   if isWindows():
  56.     proc = subprocess.Popen(["cmd","/c", "%s/casperjs site.js --url=http://spys.ru/free-proxy-list/IE/ --outputfile=%s" % (self.outputFilePath,self.fileName) ],stdout=subprocess.PIPE)
  57.   else:
  58.     proc = subprocess.Popen(["bash","-c", "cd %s && ./casperjs site.js --url=http://spys.ru/free-proxy-list/IE/ --outputfile=%s" % (self.outputFilePath,self.fileName) ],stdout=subprocess.PIPE)
  59.   out=proc.communicate()[0]
  60.   htmlFileName = ''
  61.   #因为输出路径在windows不确定,所以这里加了所有可能的路径判断
  62.   if os.path.isfile(self.fileName):
  63.     htmlFileName = self.fileName
  64.   elif os.path.isfile(os.path.join(self.outputFilePath,self.fileName)):
  65.     htmlFileName = os.path.join(self.outputFilePath,self.fileName)
  66.   elif os.path.isfile(os.path.join(os.path.dirname(os.path.realpath(__file__)),self.fileName)):
  67.     htmlFileName = os.path.join(os.path.dirname(os.path.realpath(__file__)),self.fileName)       
  68.   if (not os.path.isfile(htmlFileName)):
  69.     print 'Failed to get html content from http://spys.ru/free-proxy-list/IE/'
  70.     print out
  71.     sys.exit(3)       
  72.   mutex.acquire()
  73.   PROXYList= getTagsByAttrsExt('font',htmlFileName,'class','spy14[p=28, null, left][color=rgb(61, 70, 77)][font=&quot][size=16px]
  74. [/size][/font][/color][/p]
  75. )
  76.   for proxy in PROXYList:
  77.     tdContent = proxy.renderContents()
  78.     lineElems = re.split('[<>]',tdContent)
  79.     if re.compile(r'\d+').search(lineElems[-1]) and re.compile('(\d+\.\d+\.\d+)').search(lineElems[0]):
  80.     print lineElems[0],lineElems[-1]
  81.     PROXY_LIST.append("%s:%s" % (lineElems[0],lineElems[-1]))
  82.   mutex.release()
  83.   try:
  84.     if os.path.isfile(htmlFileName):
  85.     os.remove(htmlFileName)
  86.   except:
  87.     pass
  88. if __name__ == '__main__':
  89.   try:
  90.   if(len(sys.argv)) < 2:
  91.     print "Usage:%s [casperjs path]" % (sys.argv[0])
  92.     sys.exit(1)       
  93.   if not os.path.exists(sys.argv[1]):
  94.     print "casperjs path: %s does not exist!" % (sys.argv[1])
  95.     sys.exit(2)       
  96.   if os.path.isfile(output_file):
  97.     f = open(output_file)
  98.     lines = f.readlines()
  99.     f.close
  100.     for line in lines:
  101.     PROXY_LIST.append(line.strip())
  102.   thread1 = Site1Thread(sys.argv[1])
  103.   thread1.start()
  104.   thread1.join()
  105.   f = open(output_file,'w')
  106.   for proxy in set(PROXY_LIST):
  107.     f.write(proxy+"\n")
  108.   f.close()
  109.   print "Done!"
  110.   except SystemExit:
  111.   pass
  112.   except:
  113.     errMsg = traceback.format_exc()
  114.     print errMsg
  115.     log.error(errMsg)
复制代码



分享到:  QQ好友和群QQ好友和群 QQ空间QQ空间 腾讯微博腾讯微博 腾讯朋友腾讯朋友
收藏收藏
回复

使用道具 举报

本版积分规则

关闭

站长推荐上一条 /1 下一条

小黑屋|手机版|Archiver|51Testing软件测试网 ( 沪ICP备05003035号 关于我们

GMT+8, 2024-11-27 08:57 , Processed in 0.167996 second(s), 23 queries .

Powered by Discuz! X3.2

© 2001-2024 Comsenz Inc.

快速回复 返回顶部 返回列表