from selenium import selenium
class MySpider(CrawlSpider):
name = 'cnbeta'
allowed_domains = ['cnbeta.com']
start_urls = ['http://www.jb51.net']
rules = (
# Extract links matching 'category.PHP' (but not matching 'subsection.PHP')
# and follow links from them (since no callback means follow=True by default).
Rule(SgmlLinkExtractor(allow=('/articles/.*\.htm',)),
callback='parse_page',follow=True),
# Extract links matching 'item.PHP' and parse them with the spider's method parse_item
)
def __init__(self):
CrawlSpider.__init__(self)
self.verificationErrors = []
self.selenium = selenium("localhost",4444,"*firefox","http://www.jb51.net")
self.selenium.start()
def __del__(self):
self.selenium.stop()
print self.verificationErrors
CrawlSpider.__del__(self)
def parse_page(self,response):
self.log('Hi,this is an item page! %s' % response.url)
sel = Selector(response)
from webproxy.items import WebproxyItem
sel = self.selenium
sel.open(response.url)
sel.wait_for_page_to_load("30000")
import time
time.sleep(2.5)
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 dio@foxmail.com 举报,一经查实,本站将立刻删除。