|
| 1 | +package edu.uci.ics.crawler4j.crawler |
| 2 | + |
| 3 | +import com.github.tomakehurst.wiremock.junit.WireMockRule |
| 4 | +import edu.uci.ics.crawler4j.fetcher.PageFetcher |
| 5 | +import edu.uci.ics.crawler4j.robotstxt.RobotstxtConfig |
| 6 | +import edu.uci.ics.crawler4j.robotstxt.RobotstxtServer |
| 7 | +import edu.uci.ics.crawler4j.url.WebURL |
| 8 | +import org.junit.Rule |
| 9 | +import org.junit.rules.TemporaryFolder |
| 10 | +import spock.lang.Specification |
| 11 | + |
| 12 | +import static com.github.tomakehurst.wiremock.client.WireMock.* |
| 13 | + |
| 14 | +class NoFollowTest extends Specification { |
| 15 | + |
| 16 | + @Rule |
| 17 | + public TemporaryFolder temp = new TemporaryFolder() |
| 18 | + |
| 19 | + @Rule |
| 20 | + public WireMockRule wireMockRule = new WireMockRule() |
| 21 | + |
| 22 | + def "ignore nofollow links"() { |
| 23 | + given: "an index page with two links" |
| 24 | + stubFor(get(urlEqualTo("/some/index.html")) |
| 25 | + .willReturn(aResponse() |
| 26 | + .withStatus(200) |
| 27 | + .withHeader("Content-Type", "text/html") |
| 28 | + .withBody( |
| 29 | + $/<html> |
| 30 | + <body> |
| 31 | + <a href="/some/page1.html" rel="nofollow">should not visit this</a> |
| 32 | + <a href="/some/page2.html">link to a nofollow page</a> |
| 33 | + </body> |
| 34 | + </html>/$ |
| 35 | + ))) |
| 36 | + stubFor(get(urlPathMatching("/some/page(1|3).html")) |
| 37 | + .willReturn(aResponse() |
| 38 | + .withStatus(200) |
| 39 | + .withHeader("Content-Type", "text/html") |
| 40 | + .withBody( |
| 41 | + $/<html> |
| 42 | + <body> |
| 43 | + <h1>title</h1> |
| 44 | + </body> |
| 45 | + </html>/$))) |
| 46 | + stubFor(get(urlPathMatching("/some/page2.html")) |
| 47 | + .willReturn(aResponse() |
| 48 | + .withStatus(200) |
| 49 | + .withHeader("Content-Type", "text/html") |
| 50 | + .withBody( |
| 51 | + $/<html> |
| 52 | + <head> |
| 53 | + <meta name="robots" content="nofollow"> |
| 54 | + </head> |
| 55 | + <body> |
| 56 | + <a href="/some/page3.html">should not visit this</a> |
| 57 | + </body> |
| 58 | + </html>/$))) |
| 59 | + |
| 60 | + and: "an allow everything robots.txt" |
| 61 | + stubFor(get(urlPathMatching("/robots.txt")) |
| 62 | + .willReturn(aResponse() |
| 63 | + .withStatus(200) |
| 64 | + .withHeader("Content-Type", "text/plain") |
| 65 | + .withBody( |
| 66 | + $/User-agent: * |
| 67 | + Allow: / |
| 68 | + /$))) |
| 69 | + |
| 70 | + when: |
| 71 | + CrawlConfig config = new CrawlConfig( |
| 72 | + crawlStorageFolder: temp.getRoot().getAbsolutePath() |
| 73 | + , politenessDelay: 100 |
| 74 | + , maxConnectionsPerHost: 1 |
| 75 | + , threadShutdownDelaySeconds: 1 |
| 76 | + , threadMonitoringDelaySeconds: 1 |
| 77 | + , cleanupDelaySeconds: 1 |
| 78 | + ) |
| 79 | + |
| 80 | + PageFetcher pageFetcher = new PageFetcher(config) |
| 81 | + RobotstxtServer robotstxtServer = new RobotstxtServer(new RobotstxtConfig(), pageFetcher) |
| 82 | + CrawlController controller = new CrawlController(config, pageFetcher, robotstxtServer) |
| 83 | + controller.addSeed "http://localhost:8080/some/index.html" |
| 84 | + |
| 85 | + controller.start(WebCrawler.class, 1) |
| 86 | + |
| 87 | + then: "nofollow links should not be visited" |
| 88 | + verify(exactly(1), getRequestedFor(urlEqualTo("/robots.txt"))) |
| 89 | + verify(exactly(0), getRequestedFor(urlEqualTo("/some/page1.html"))) |
| 90 | + verify(exactly(1), getRequestedFor(urlEqualTo("/some/page2.html"))) |
| 91 | + verify(exactly(0), getRequestedFor(urlEqualTo("/some/page3.html"))) |
| 92 | + } |
| 93 | +} |
0 commit comments