admin 发表于 2022-12-8 15:52:44

Xpath爬取当当网

import requests
import time
from lxml import etree
import json


def json_data_save(url):
    headers = {
      'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'
    }

    html = requests.get(url, headers=headers,timeout=2).content.decode('gbk')
    data = etree.HTML(html)
    lists = data.xpath('//ul[@class="bigimg"]/li')
    dicts = {}
    names = []
    athors = []
    publishs = []
    publish_times = []
    prices = []


    for lis in lists:
      name = lis.xpath('./a/@title')
      names.append(name)
      athor = lis.xpath('./p[@class="search_book_author"]/span/a/@title')
      athors.append(athor)
      publish = lis.xpath('./p[@class="search_book_author"]/span/a/@title')[-1]
      publishs.append(publish)
      publish_time = lis.xpath('./p[@class="search_book_author"]/span/text()')[-2].split('/')[-1]
      publish_times.append(publish_time)
      
      price = lis.xpath('./p[@class="price"]/span[@class="search_now_price"]/text()').split('¥')[-1]
      prices.append(price)

    dicts['name'] = names
    dicts['athor'] = athors
    dicts['publish'] = publishs
    dicts['publish_time'] = publish_times
    dicts['price'] = prices
    # 保存数据为json格式
    try:
      with open('dsj_2002班_20200126057.json', 'a', encoding="utf-8") as f:
            f.write(json.dumps(dicts, ensure_ascii=False))
    except IOError as e:
      print(str(e))

    finally:
      f.close()




def open_json(path):
    try:
      with open(path, 'r', encoding='utf-8') as f:
            js_data = json.load(f)
            print('json数据:', js_data)

    except Exception as e:
      print(str(e))
    finally:
      f.close()


if __name__ == '__main__':
    url = 'http://search.dangdang.com/?key=python爬虫&act=input'
    json_data_save(url)
    time.sleep(2)
    path = 'dsj_2002班_20200126057.json'
    open_json(path)
页: [1]
查看完整版本: Xpath爬取当当网