爬取猫眼小demo

import json
import time
import requests
from requests.exceptions import RequestException
from lxml import etree


def get_one_page(url):
    headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
    }
    try:
        response =requests.get(url,headers=headers)
        if response.status_code==200:
            return response.text
        return None
    except RequestException:
        return None
def parse_one_page(page_sourse):
    html = etree.HTML(page_sourse)
    movie_list = html.xpath('//dl[@class="board-wrapper"]//dd')
    movies=[]
    for movie in movie_list:
        info= {
        'index':movie.xpath('./i/text()')[0],
        'image':movie.xpath('./a/img[2]/@data-src')[0],
        'title':movie.xpath('.//p[@class="name"]/a/text()')[0],
        'actor':movie.xpath('.//p[@class="star"]/text()')[0].strip(),
        'time':movie.xpath('.//p[@class="releasetime"]/text()')[0],
        'score':''.join(movie.xpath('.//p[@class="score"]/i/text()'))
        }
        movies.append(info)
        print(info)
    return movies

def write_to_json(movies):
    with open('result.txt','a') as f:
        for movie in movies:
            info = json.dumps(movie,ensure_ascii=False)
            f.write(info+'\n')

def main(offset):
    url = 'http://maoyan.com/board/4?offset=' +str(offset)
    page_sourse =get_one_page(url)
    movies = parse_one_page(page_sourse)
    write_to_json(movies)

if __name__ == '__main__':
    for i in range(5):
        main(offset=i*10)
        time.sleep(2)