MatterPagePcoessor.java
1.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
package com.canrd.webmagic.processor;
import com.canrd.webmagic.processor.config.Agent;
import com.canrd.webmagic.processor.pipeline.ArticlePipeline;
import org.springframework.stereotype.Component;
import lombok.extern.slf4j.Slf4j;
import us.codecraft.webmagic.Page;
import us.codecraft.webmagic.Site;
import us.codecraft.webmagic.Spider;
import us.codecraft.webmagic.processor.PageProcessor;
import us.codecraft.webmagic.selector.Html;
@Component
@Slf4j
public class MatterPagePcoessor implements PageProcessor {
private Site site = Site.me()
.setRetryTimes(3)
.setSleepTime(5)
.setUserAgent(Agent.getRandom());
@Override
public void process(Page page) {
//首页
if (page.getUrl().get().equals("https://www.cell.com/matter/home")){
}
//搜索页
else if (page.getUrl().get().contains("https://www.cell.com/action/doSearch?")) {
}
//详情页
else if (page.getUrl().get().contains("https://www.cell.com/matter/fulltext/")) {
doArticleContent(page);
}
}
@Override
public Site getSite() {
return this.site;
}
public void doArticleContent(Page page) {
Html html = page.getHtml();
log.info(String.valueOf(html));
String articleCode = page.getUrl().get();
// html.xpath()
}
public static void main(String[] args) {
// 创建一个Spider,并把我们的处理器放进去
Spider.create(new MatterPagePcoessor())
// 添加这个Spider要爬取的网页地址
.addUrl("https://www.cell.com/matter/home")
.addPipeline(new ArticlePipeline())
// 开启5个线程执行,并开始爬取
.thread(1).run();
}
}