使用bs4爬取豆瓣讨论精选

bs4 的基本操作

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103

import requests
import re
from lxml import etree


# 取页面HTML
def get_one_page():
url = "https://www.douban.com/group/explore"
headers = {
"User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)"
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
text = response.content.decode('utf-8')
return text
return None


# 解析页面
def parse_with_xpath(html):
etree_html = etree.HTML(html)
# print(etree_html)
# 匹配所有文章标题
# channel_result = etree_html.xpath('//div[@class="channel-item"]')
# for channel in channel_result:
# title = channel.xpath('./div[@class="bd"]/h3/a/text()')[0]
# print(title)

# title_result = etree_html.xpath('//div[@class="channel-item"]/div[@class="bd"]/h3/a/text()')
# print(title_result)

# 匹配所有节点 //*
# result = etree_html.xpath('//*')
# print(result)
# print(len(result))

# 匹配所有子节点 //a 文本获取:text()
# result = etree_html.xpath('//a/text()')
# print(result)

# 查找元素子节点 /
# result = etree_html.xpath('//div/p/text()')
# print(result)

# 查找元素所有子孙节点 //
# result = etree_html.xpath('//div[@class="channel-item"]')
# print(len(result))
# result = etree_html.xpath('//div[@class="channel-item"] | //span[@class="pubtime"]/../span/a/text()')
# print(result)

# 父节点 ..
# result = etree_html.xpath('//span[@class="pubtime"]/../span/a/text()')
# print(result)

# 属性匹配 [@class="xxx"]
# 文本匹配 text() 获取所有文本//text()
# result = etree_html.xpath('//div[@class="article"]//text()')
# print(result)

# 属性获取 @href
# result = etree_html.xpath('//div[@class="article"]/div/div/@class')[0]
# result = etree_html.xpath('//div[@class="bd"]/h3/a/@href')
# print(result)

# 属性多值匹配 contains(@class 'xx')
# result = etree_html.xpath('//div[contains(@class, "grid-16-8")]//div[@class="likes"]/text()[1]')
# print(result)

# 多属性匹配 or, and, mod, //book | //cd, + - * div = != < > <= >=
# result = etree_html.xpath('//span[@class="pubtime" and contains(text(), "12-29")]/text()')
# print(result)

# 按序选择 [1] [last()] [poistion() < 3] [last() -2]
# 节点轴
# result = etree_html.xpath('//div/child::div[@class="likes"]/following-sibling::*')
# print(result)
# print(len(result))

# //li/ancestor::* 所有祖先节点
# //li/ancestor::div div这个祖先节点
# //li/attribute::* attribute轴,获取li节点所有属性值
# //li/child::a[@href="link1.html"] child轴,获取直接子节点
# //li/descendant::span 获取所有span类型的子孙节点
# //li/following::* 选取文档中当前节点的结束标记之后的所有节点
# //li/following-sibling::* 选取当前节点之后的所用同级节点

# result = etree_html.xpath('//div[@class="channel-item"][1]/following-sibling::*//span[@class="pubtime"]/text()')
# print(result)
# print(len(result))

# result = etree_html.xpath('//div[contains(@class, "channel-group-rec")]//div[@class="title"]/following::*[1]/text()')
# print(result)


def main():
html = get_one_page()
# print(html)
parse_with_xpath(html)


if __name__ == '__main__':
main()

使用bs4爬取豆瓣的讨论精选并将其保存现在json文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
import json

import requests

from lxml import etree


# 获取一页数据
def get_page(url):
headers = {
"User-Agent": "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)"
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
text = response.content.decode('utf-8')
return text
return None


# 获取全部数据
def get_all_page():
all_list = []
for i in range(306):
page = i*30
url = "https://www.douban.com/group/explore?start=%d" % page
html = get_page(url)
# print(html)
# 解析页面
result_list = parse_with_xpath(html)
all_list.append(result_list)
return all_list


def parse_with_xpath(html):
parse = []
etree_html = etree.HTML(html)
items = etree_html.xpath('//div[@class="channel-item"]')
for item in items:
# 标题
title = item.xpath('./div[@class="bd"]/h3/a/text()')[0]
# 简介
p = item.xpath('./div[@class="bd"]/div/p/text()')[0]
# 来源
source = item.xpath('./div[@class="bd"]/div/span[@class="from"]/a/text()')[0]
# 发布时间
pubtime = item.xpath('./div[@class="bd"]/div/span[@class="pubtime"]/text()')[0]
# 多少人喜欢
like = item.xpath('./div[@class="likes"]/text()')[0]
# print(like)
# 详细页
href = item.xpath('./div[@class="bd"]/h3/a/@href')[0]
# print(href)
# 保存在数据库中
page_list = {}
page_list['title'] = title
page_list['href'] = href
page_list['content'] = p
page_list['source'] = source
page_list['time'] = pubtime
page_list['like'] = like
parse.append(page_list)
return parse


#保存json数据
def save_json(result_list):
result_json = json.dumps(result_list, ensure_ascii=False)
with open('douban.json', 'w', encoding='utf-8') as f:
f.write(result_json)


def main():
result = get_all_page()
print(len(result))
save_json(result)


if __name__ == '__main__':
main()