#!/usr/bin/env python
# -*- coding:utf-8 -*-

import threading
from Queue import Queue
from lxml import etree
import requests
import json
import time

class ThreadCrawl(threading.Thread):
    def __init__(self, threadName, pageQueue, dataQueue):
        #threading.Thread.__init__(self)
        super(ThreadCrawl, self).__init__()
        self.threadName = threadName
        self.pageQueue = pageQueue
        self.dataQueue = dataQueue
        self.headers = {"User-Agent" : "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"}

    def run(self):
        print "启动 " + self.threadName
        while not CRAWL_EXIT:
            try:
                # 可选参数block,默认值为True
                #1. 如果对列为空,block为True的话,不会结束,会进入阻塞状态,直到队列有新的数据
                #2. 如果队列为空,block为False的话,就弹出一个Queue.empty()异常,
                page = self.pageQueue.get(False)
                url = "https://www.qiushibaike.com/text/page/" + str(page) +"/"
                content = requests.get(url, headers = self.headers).text
                time.sleep(1)
                self.dataQueue.put(content)
            except:
                pass
        print "结束 " + self.threadName

class ThreadParse(threading.Thread):
    def __init__(self, threadName, dataQueue, filename, lock):
        super(ThreadParse, self).__init__()
        self.threadName = threadName
        self.dataQueue = dataQueue
        self.filename = filename
        self.lock = lock

    def run(self):
        print "启动" + self.threadName
        while not PARSE_EXIT:
            try:
                html = self.dataQueue.get(False)
                self.parse(html)
            except:
                pass
        print "退出" + self.threadName

    def parse(self, html):
        # 解析为HTML DOM
        html = etree.HTML(html)
        node_list = html.xpath('//div[contains(@id, "qiushi_tag")]')
        for site in node_list:
            imgUrl = site.xpath('./div/a/img/@src')[0]
            username = site.xpath('./div/a/h2')[0].text.strip()
            rank = site.xpath('./div/div')[0].text
            content = site.xpath('.//div[@class="content"]/span')[0].text.strip()
            vote = site.xpath('.//span[@class="stats-vote"]/i')[0].text
            comments = site.xpath('.//span[@class="stats-comments"]//i')[0].text
            items = {
                "imgUrl": imgUrl,
                "username" : username,
                "rank" : rank,
                "content" : content,
                "vote" : vote,
                "comments" : comments
            }
            with self.lock:
                self.filename.write(json.dumps(items, ensure_ascii = False).encode("utf-8") + "\n")

#采集
CRAWL_EXIT = False
#解析
PARSE_EXIT = False

def main():
    pageQueue = Queue(20)
    for i in range(1, 21):
        pageQueue.put(i)

    # 采集结果(每页的HTML源码)的数据队列,参数为空表示不限制
    dataQueue = Queue()

    filename = open("qiushi.json", "a")
    lock = threading.Lock()

    # 三个采集线程的名字
    crawlList = ["采集线程1号", "采集线程2号", "采集线程3号"]
    # 存储三个采集线程的列表集合
    threadcrawl = []
    for threadName in crawlList:
        thread = ThreadCrawl(threadName, pageQueue, dataQueue)
        thread.start()
        threadcrawl.append(thread)


    # 三个解析线程的名字
    parseList = ["解析线程1号","解析线程2号","解析线程3号"]
    # 存储三个解析线程
    threadparse = []
    for threadName in parseList:
        thread = ThreadParse(threadName, dataQueue, filename, lock)
        thread.start()
        threadparse.append(thread)

    # 等待pageQueue队列为空,也就是等待之前的操作执行完毕
    while not pageQueue.empty():
        pass
    # 如果pageQueue为空,采集线程退出循环
    global CRAWL_EXIT
    CRAWL_EXIT = True
    print "pageQueue为空"
    for thread in threadcrawl:
        thread.join()
        print "1"

    while not dataQueue.empty():
        pass
    global PARSE_EXIT
    PARSE_EXIT = True
    for thread in threadparse:
        thread.join()
        print "2"
    with lock:
        filename.close()
    print "谢谢使用!"

if __name__ == "__main__":
    main()

Logo

CSDN联合极客时间,共同打造面向开发者的精品内容学习社区,助力成长!

更多推荐