# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class CrspiderSpider(CrawlSpider):
name = 'crSpider'
allowed_domains = ['china-railway.com.cn']
start_urls = ['http://www.china-railway.com.cn/xwzx/ywsl/']
rules = (
Rule(LinkExtractor(allow=r'http://www.china-railway.com.cn/xwzx/[a-zA-Z]+/'), follow=True),
Rule(LinkExtractor(allow=r'http://www.china-railway.com.cn/xwzx/[a-zA-Z]+/index_\d+.html'), follow=True),
Rule(LinkExtractor(allow=r'http://www.china-railway.com.cn/xwzx/.+t\d{8}_\d{6}.html'), callback='parse_item')
)
def parse_item(self, response):
self.logger.info('Hi, this is an item page! %s', response.url)
print('-' * 40, '进入回调', '-' * 40, )
newsName = response.xpath('//h1').get()
print(newsName)
# def parse(self, response):
# item = {}
# print('-' * 40, '进入 parse 回调', '-' * 40, )
# print(response.text)
# newsName = response.xpath('//h1').get()
# return item
2020-03-13 12:38:25 [scrapy.core.engine] INFO: Spider opened
2020-03-13 12:38:25 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
2020-03-13 12:38:25 [scrapy.extensions.telnet] INFO: Telnet console listening on 127.0.0.1:6024
2020-03-13 12:38:25 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://www.china-railway.com.cn/xwzx/ywsl/> (referer: None)
2020-03-13 12:38:26 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://www.china-railway.com.cn/xwzx/ywsl/> (referer: http://www.china-railway.com.cn/xwzx/ywsl/)
2020-03-13 12:38:26 [scrapy.dupefilters] DEBUG: Filtered duplicate request: <GET http://www.china-railway.com.cn/xwzx/ywsl/> - no more duplicates will be shown (see DUPEFILTER_DEBUG to show all duplicates)
2020-03-13 12:38:27 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://www.china-railway.com.cn/xwzx/ywsl/202003/t20200304_101019.html> (referer: http://www.china-railway.com.cn/xwzx/ywsl/)
2020-03-13 12:38:28 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://www.china-railway.com.cn/xwzx/ywsl/202003/t20200305_101067.html> (referer: http://www.china-railway.com.cn/xwzx/ywsl/)
2020-03-13 12:38:29 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://www.china-railway.com.cn/xwzx/ywsl/202003/t20200305_101100.html> (referer: http://www.china-railway.com.cn/xwzx/ywsl/)
2020-03-13 12:38:31 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://www.china-railway.com.cn/xwzx/ywsl/202003/t20200306_101120.html> (referer: http://www.china-railway.com.cn/xwzx/ywsl/)
2020-03-13 12:38:31 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://www.china-railway.com.cn/xwzx/ywsl/202003/t20200307_101174.html> (referer: http://www.china-railway.com.cn/xwzx/ywsl/)
2020-03-13 12:38:32 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://www.china-railway.com.cn/xwzx/ywsl/202003/t20200310_101326.html> (referer: http://www.china-railway.com.cn/xwzx/ywsl/)
2020-03-13 12:38:34 [scrapy.core.engine] DEBUG: Crawled (200) <GET http://www.china-railway.com.cn/xwzx/ywsl/202003/t20200311_101362.html> (referer: http://www.china-railway.com.cn/xwzx/ywsl/)
1
kasper4649 2020-03-13 13:24:35 +08:00 via Android
第三个 rule 后面也加个逗号?
|
2
gsz2015 OP @kasper4649 加不加逗号都试过了😂,难道是 Scrapy 2.0 的问题吗
|
3
IanPeverell 2020-03-13 16:12:40 +08:00
你把单引号去掉试试,你传的应该是函数不是字符串
|
4
IanPeverell 2020-03-13 16:26:53 +08:00
@IanPeverell 哦,字符串也可以(捂脸逃)
|
5
gsz2015 OP @IanPeverell 刚刚解决了,是正则的问题,第一个正则也能匹配到第三个正则的 url,所以一直没有调用到 callback 😂
|