-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathstreg.py
More file actions
64 lines (49 loc) · 1.76 KB
/
streg.py
File metadata and controls
64 lines (49 loc) · 1.76 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import re
s = "hello World!"
print(s.capitalize())
print(s.lower())
print(s.upper())
print(s.count('o', 5))
print(s.find('o', 5))
s1 = '{0} and {1} are best {word1} {word2} in the world!'
print(s1.format('Python', 'Java', word1='programming', word2='language'));
s1_array = ['Python', 'and','Java', 'are', 'best', 'programming', 'language', 'in', 'the', 'world!']
print(' '.join(s1_array))
s2 = ' ssss AAAA ssss '
print(s2.strip())
print(s2.lstrip().lstrip('s'))
print(s2.rstrip().lstrip('s'))
print(s2.replace(' ', '_'))
print(s2.strip().split(sep=' ', maxsplit=-1))
#re test
print(re.match('^\d{3}\-\d{3,8}$', '010-123456'))
if re.match(r'^\d{3}\\\d{3,8}$', r'010\123456'):
print('OK')
else:
print('failed')
print(re.split(r'\s+', 'a b c d'))
print(re.split(r'[\s\,]+', 'a, b, c d'))
print(re.split(r'[\s\,\;]+', 'a, b;; c, d'))
# -*- coding: utf-8 -*-
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule, CrawlSpider
from weibo.items import WeiboItem
class ExampleSpider(CrawlSpider):
name = 'jlu'
allowed_domains = ['jlu.edu.cn']
start_urls = ['https://news.jlu.edu.cn/jdxw/xykx/66.htm']
rules = [
# xykx/66.htm
Rule(LinkExtractor(allow=r'xykx/\d+\.htm*'), callback='parse_item', follow=True,)
]
def parse_item(self, response):
content_list = response.xpath('//div[@class = "list-news-left fl"]/ul/li')
for content in content_list:
item = WeiboItem()
name = content.xpath('./span/text()').extract()
title = content.xpath('./a/text()').extract()
info = content.xpath('./a/@href').extract()
item['name'] = name[0]
item['title'] = title[0]
item['info'] = info[0]
yield item