Science4JournalSearchPageProcessor.java 6.73 KB
package com.canrd.webmagic.processor;

import com.canrd.webmagic.common.utils.DateUtil;
import com.canrd.webmagic.common.utils.KeywordUtil;
import com.canrd.webmagic.processor.config.Agent;
import com.canrd.webmagic.processor.download.SeleniumDownloader;
import com.canrd.webmagic.processor.pipeline.ArticlePipeline;
import lombok.extern.slf4j.Slf4j;
import org.apache.logging.log4j.core.util.UuidUtil;
import org.springframework.stereotype.Component;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;
import us.codecraft.webmagic.selector.Selectable;
import us.codecraft.webmagic.selector.XpathSelector;

import javax.annotation.Resource;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.List;
import java.util.Locale;

/**
 * @author: xms
 * @description: TODO
 * @date: 2024/4/1 14:19
 * @version: 1.0
 */
@Slf4j
@Component
public class Science4JournalSearchPageProcessor implements PageProcessor {

    @Resource
    private Science4JournalArticlePageProcessor science4JournalArticlePageProcessor;

    @Resource
    private SeleniumDownloader seleniumDownloader;

    @Resource
    private ArticlePipeline articlePipeline;

    /**
     * 抓取网站的相关配置,包括编码、抓取间隔、重试次数等
     */
    private Site site = Site.me().setRetryTimes(3).setSleepTime(5).setUserAgent(Agent.getRandom());

    /**
     * 定制爬虫逻辑的核心接口,在这里编写抽取逻辑
     *
     * @param page
     */
    @Override
    public void process(Page page) {
        if (page.getUrl().get().contains("doSearch")) {
            doSearch(page);
        } else {
            doArticleList(page);
        }
    }

    /**
     * @param page
     */
    private void doSearch(Page page) {
        String url = page.getUrl().get();
        /**
         * 通过page.getHtml()可以获取到main函数中Spider.create(new BaiduHotSearchPageProcessor()).addUrl中的地址的网页内容
         * 1、通过$或css()方法获取到该page html下某元素dom
         */
        Selectable selectable = page.getHtml().xpath("//div[@class=' search-result__body titles-results ']").select(new XpathSelector("div[@class='card pb-3 mb-4 border-bottom']"));
        List<Selectable> nodes = selectable.nodes();

        /**
         * 获取到指定的dom后,从这些dom中提取元素内容。
         */
        for (int i = 0; i <= nodes.size() - 1; i++) {
            String title = nodes.get(i).xpath("//div[@class='card pb-3 mb-4 border-bottom']/div").xpath("//div[@class='d-flex justify-content-between align-items-end']/div/span/h2/a/text()").get();
            String time = nodes.get(i).xpath("//div[@class='card-meta align-middle mb-2 text-uppercase text-darker-gray']/span").xpath("//time/text()").get();
            String link = nodes.get(i).links().get();
            SimpleDateFormat formatter = new SimpleDateFormat("dd MMMM yyyy", Locale.ENGLISH);
            try {
                Date publishTimeDateTime = formatter.parse(time);
                if (!publishTimeDateTime.before(DateUtil.localDate2Date(DateUtil.parseDate("2000-01-01", DateUtil.DATE)))) {
//                        page.addTargetRequest(link);
                    Spider.create(science4JournalArticlePageProcessor)
                            .addUrl(link)
                            .addPipeline(articlePipeline)
                            .setDownloader(seleniumDownloader)
                            .setUUID(UuidUtil.getTimeBasedUuid().toString())
                            // 开启5个线程执行,并开始爬取
                            .thread(1).run();
                    log.info("关键字文章列表链接:{},标题:{},文章链接:{}", url, title, link);
                }
            } catch (ParseException e) {
                e.printStackTrace();
            }
        }

    }

    /**
     * @param page
     */
    private void doArticleList(Page page) {
        String url = page.getUrl().get();
        /**
         * 通过page.getHtml()可以获取到main函数中Spider.create(new BaiduHotSearchPageProcessor()).addUrl中的地址的网页内容
         * 1、通过$或css()方法获取到该page html下某元素dom
         */
        Selectable selectable = page.getHtml().xpath("//div[@class=' search-result__body titles-results ']").select(new XpathSelector("div[@class='card pb-3 mb-4 border-bottom']"));
        List<Selectable> nodes = selectable.nodes();

        /**
         * 获取到指定的dom后,从这些dom中提取元素内容。
         */
        for (int i = 0; i <= nodes.size() - 1; i++) {
            String title = nodes.get(i).xpath("//div[@class='card pb-3 mb-4 border-bottom']/div").xpath("//div[@class='d-flex justify-content-between align-items-end']/div/span/h2/a/text()").get();
            String time = nodes.get(i).xpath("//div[@class='card-meta align-middle mb-2 text-uppercase text-darker-gray']/span").nodes().get(2).xpath("//time/text()").get();
            String link = nodes.get(i).links().get();
            if (KeywordUtil.containKeywordsInTitle(title)) {
                SimpleDateFormat formatter = new SimpleDateFormat("dd MMMM yyyy", Locale.ENGLISH);
                try {
                    Date publishTimeDateTime = formatter.parse(time);
                    if (!publishTimeDateTime.before(DateUtil.localDate2Date(DateUtil.parseDate("2000-01-01", DateUtil.DATE)))) {
//                        page.addTargetRequest(link);
                        Spider.create(science4JournalArticlePageProcessor)
                                .addUrl(link)
                                .addPipeline(articlePipeline)
                                .setDownloader(seleniumDownloader)
                                .setUUID(UuidUtil.getTimeBasedUuid().toString())
                                // 开启5个线程执行,并开始爬取
                                .thread(1).run();
                        log.info("关键字文章列表链接:{},标题:{},文章链接:{}", url, title, link);
                    }
                } catch (ParseException e) {
                    e.printStackTrace();
                }

            }
        }

    }

    @Override
    public Site getSite() {
        return site;
    }

    public static void main(String[] args) {
        // 创建一个Spider,并把我们的处理器放进去
        Spider.create(new Science4JournalSearchPageProcessor())
                // 添加这个Spider要爬取的网页地址
                .addUrl("https://www.science.org/journal/science/insights?startPage=0")
                .addPipeline(new ArticlePipeline())
                // 开启5个线程执行,并开始爬取
                .thread(5).run();
    }
}