jsoup爬虫
文章目錄
- 1、jsoup爬蟲簡單介紹
- 2、相關代碼
- 2.1導入pom依賴
- 2.2、圖片爬取
- 2.3、圖片本地化
- 3、百度云鏈接爬蟲
1、jsoup爬蟲簡單介紹
jsoup 是一款 Java 的HTML 解析器,可通過DOM,CSS選擇器以及類似于JQuery的操作方法來提取和操作Html文檔數據。
這兩個涉及到的點有以下幾個:
1、httpclient獲取網頁內容
2、Jsoup解析網頁內容
3、要達到增量爬取的效果,那么需要利用緩存ehcache對重復URL判重
4、將爬取到的數據存入數據庫
5、為解決某些網站防盜鏈的問題,那么需要將對方網站的靜態資源(這里只處理了圖片)本地化
2、相關代碼
2.1導入pom依賴
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd"><modelVersion>4.0.0</modelVersion><groupId>com.zrh</groupId><artifactId>T226_jsoup</artifactId><version>0.0.1-SNAPSHOT</version><packaging>jar</packaging><name>T226_jsoup</name><url>http://maven.apache.org</url><properties><project.build.sourceEncoding>UTF-8</project.build.sourceEncoding></properties><dependencies><!-- jdbc驅動包 --><dependency><groupId>mysql</groupId><artifactId>mysql-connector-java</artifactId><version>5.1.44</version></dependency><!-- 添加Httpclient支持 --><dependency><groupId>org.apache.httpcomponents</groupId><artifactId>httpclient</artifactId><version>4.5.2</version></dependency><!-- 添加jsoup支持 --><dependency><groupId>org.jsoup</groupId><artifactId>jsoup</artifactId><version>1.10.1</version></dependency><!-- 添加日志支持 --><dependency><groupId>log4j</groupId><artifactId>log4j</artifactId><version>1.2.16</version></dependency><!-- 添加ehcache支持 --><dependency><groupId>net.sf.ehcache</groupId><artifactId>ehcache</artifactId><version>2.10.3</version></dependency><!-- 添加commons io支持 --><dependency><groupId>commons-io</groupId><artifactId>commons-io</artifactId><version>2.5</version></dependency><dependency><groupId>com.alibaba</groupId><artifactId>fastjson</artifactId><version>1.2.47</version></dependency></dependencies> </project>2.2、圖片爬取
需要修改你要爬取的圖片地址
private static String URL = "http://www.yidianzhidao.com/UploadFiles/img_1_446119934_1806045383_26.jpg";2.3、圖片本地化
crawler.properties
dbUrl=jdbc:mysql://localhost:3306/zrh?autoReconnect=true dbUserName=root dbPassword=123 jdbcName=com.mysql.jdbc.Driver ehcacheXmlPath=C://blogCrawler/ehcache.xml blogImages=D://blogCrawler/blogImages/log4j.properties
log4j.rootLogger=INFO, stdout,D #Console log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.Target = System.out log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n#D log4j.appender.D = org.apache.log4j.RollingFileAppender log4j.appender.D.File = C://blogCrawler/bloglogs/log.log log4j.appender.D.MaxFileSize=100KB log4j.appender.D.MaxBackupIndex=100 log4j.appender.D.Append = true log4j.appender.D.layout = org.apache.log4j.PatternLayout log4j.appender.D.layout.ConversionPattern = %-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%nDbUtil.java
package com.zrh.util;import java.sql.Connection; import java.sql.DriverManager;/*** 數據庫工具類* @author user**/ public class DbUtil {/*** 獲取連接* @return* @throws Exception*/public Connection getCon()throws Exception{Class.forName(PropertiesUtil.getValue("jdbcName"));Connection con=DriverManager.getConnection(PropertiesUtil.getValue("dbUrl"), PropertiesUtil.getValue("dbUserName"), PropertiesUtil.getValue("dbPassword"));return con;}/*** 關閉連接* @param con* @throws Exception*/public void closeCon(Connection con)throws Exception{if(con!=null){con.close();}}public static void main(String[] args) {DbUtil dbUtil=new DbUtil();try {dbUtil.getCon();System.out.println("數據庫連接成功");} catch (Exception e) {e.printStackTrace();System.out.println("數據庫連接失敗");}} }PropertiesUtil.java
package com.zrh.util;import java.io.IOException; import java.io.InputStream; import java.util.Properties;/*** properties工具類* @author user**/ public class PropertiesUtil {/*** 根據key獲取value值* @param key* @return*/public static String getValue(String key){Properties prop=new Properties();InputStream in=new PropertiesUtil().getClass().getResourceAsStream("/crawler.properties");try {prop.load(in);} catch (IOException e) {// TODO Auto-generated catch blocke.printStackTrace();}return prop.getProperty(key);} }最重要的代碼來了:
BlogCrawlerStarter.java(核心代碼)
再看下我們的數據庫的數據都插入了:
3、百度云鏈接爬蟲
PanZhaoZhaoCrawler3.java
package com.zrh.crawler;import java.io.IOException; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; import java.util.LinkedList; import java.util.List; import java.util.UUID;import org.apache.http.HttpEntity; import org.apache.http.client.ClientProtocolException; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; import org.apache.http.util.EntityUtils; import org.apache.log4j.Logger; import org.jsoup.Jsoup; import org.jsoup.nodes.Document; import org.jsoup.nodes.Element; import org.jsoup.select.Elements;import com.zrh.util.DbUtil; import com.zrh.util.PropertiesUtil;import net.sf.ehcache.Cache; import net.sf.ehcache.CacheManager; import net.sf.ehcache.Status;public class PanZhaoZhaoCrawler3 {private static Logger logger = Logger.getLogger(PanZhaoZhaoCrawler3.class);private static String URL = "http://www.13910.com/daren/";private static String PROJECT_URL = "http://www.13910.com";private static Connection con;private static CacheManager manager;private static Cache cache;private static CloseableHttpClient httpClient;private static long total = 0;/*** httpclient獲取首頁內容*/public static void parseHomePage() {logger.info("開始爬取:" + URL);manager = CacheManager.create(PropertiesUtil.getValue("ehcacheXmlPath"));cache = manager.getCache("cnblog");httpClient = HttpClients.createDefault();HttpGet httpGet = new HttpGet(URL);RequestConfig config = RequestConfig.custom().setConnectTimeout(5000).setSocketTimeout(8000).build();httpGet.setConfig(config);CloseableHttpResponse response = null;try {response = httpClient.execute(httpGet);if (response == null) {logger.info("鏈接超時!");} else {if (response.getStatusLine().getStatusCode() == 200) {HttpEntity entity = response.getEntity();String pageContent = EntityUtils.toString(entity, "utf-8");parsePageContent(pageContent);}}} catch (ClientProtocolException e) {logger.error(URL + "-解析異常-ClientProtocolException", e);} catch (IOException e) {logger.error(URL + "-解析異常-IOException", e);} finally {try {if (response != null) {response.close();}if (httpClient != null) {httpClient.close();}} catch (IOException e) {logger.error(URL + "-解析異常-IOException", e);}}// 最終將數據緩存到硬盤中if (cache.getStatus() == Status.STATUS_ALIVE) {cache.flush();}manager.shutdown();logger.info("結束爬取:" + URL);}/*** Jsoup解析首頁內容* @param pageContent*/private static void parsePageContent(String pageContent) {Document doc = Jsoup.parse(pageContent);Elements aEles = doc.select(".showtop .key-right .darenlist .list-info .darentitle a");for (Element aEle : aEles) {String aHref = aEle.attr("href");logger.info("提取個人代理分享主頁:"+aHref);String panZhaoZhaoUserShareUrl = PROJECT_URL + aHref;List<String> panZhaoZhaoUserShareUrls = getPanZhaoZhaoUserShareUrls(panZhaoZhaoUserShareUrl);for (String singlePanZhaoZhaoUserShareUrl : panZhaoZhaoUserShareUrls) { // System.out.println("**********************************************************"+singlePanZhaoZhaoUserShareUrl+"**********************************************************"); // continue;parsePanZhaoZhaoUserShareUrl(singlePanZhaoZhaoUserShareUrl);}}}/*** 收集個人主頁的前15條記錄* @param panZhaoZhaoUserShareUrl* @return*/private static List<String> getPanZhaoZhaoUserShareUrls(String panZhaoZhaoUserShareUrl){List<String> list = new LinkedList<String>();list.add(panZhaoZhaoUserShareUrl);for (int i = 2; i < 16; i++) {list.add(panZhaoZhaoUserShareUrl+"page-"+i+".html");}return list;}/*** 解析盤找找加工后的用戶URL* 原:http://yun.baidu.com/share/home?uk=1949795117* 現在:http://www.13910.com/u/1949795117/share/* @param panZhaoZhaoUserShareUrl 現在的url*/private static void parsePanZhaoZhaoUserShareUrl(String panZhaoZhaoUserShareUrl) {logger.info("開始爬取個人代理分享主頁::"+panZhaoZhaoUserShareUrl);HttpGet httpGet = new HttpGet(panZhaoZhaoUserShareUrl);RequestConfig config = RequestConfig.custom().setConnectTimeout(5000).setSocketTimeout(8000).build();httpGet.setConfig(config );CloseableHttpResponse response = null;try {response = httpClient.execute(httpGet);if(response == null) {logger.info("鏈接超時!");}else {if(response.getStatusLine().getStatusCode() == 200) {HttpEntity entity = response.getEntity();String pageContent = EntityUtils.toString(entity, "utf-8");parsePanZhaoZhaoUserSharePageContent(pageContent,panZhaoZhaoUserShareUrl);}}} catch (ClientProtocolException e) {logger.error(panZhaoZhaoUserShareUrl+"-解析異常-ClientProtocolException",e);} catch (IOException e) {logger.error(panZhaoZhaoUserShareUrl+"-解析異常-IOException",e);}finally {try {if(response != null) {response.close();}} catch (IOException e) {logger.error(panZhaoZhaoUserShareUrl+"-解析異常-IOException",e);}}logger.info("結束爬取個人代理分享主頁::"+URL);}/*** 通過用戶分享的百度云主頁URL獲取的內容,得到所有加工后的鏈接* @param pageContent* @param panZhaoZhaoUserShareUrl 加工后的用戶分享主頁鏈接*/private static void parsePanZhaoZhaoUserSharePageContent(String pageContent, String panZhaoZhaoUserShareUrl) {Document doc = Jsoup.parse(pageContent);Elements aEles = doc.select("#flist li a");if(aEles.size() == 0) {logger.info("沒有爬取到百度云地址");return;}for (Element aEle : aEles) {String ahref = aEle.attr("href");parseUserHandledTargetUrl(PROJECT_URL + ahref);} // System.out.println("***********************************"+aEles.size()+"***********************"+ahref+"**********************************************************");}/*** 解析地址* @param handledTargetUrl 這個地址中包含了加工后的百度云地址*/private static void parseUserHandledTargetUrl(String handledTargetUrl) {logger.info("開始爬取blog::"+handledTargetUrl);if(cache.get(handledTargetUrl) != null) {logger.info("數據庫已存在該記錄");return;}HttpGet httpGet = new HttpGet(handledTargetUrl);RequestConfig config = RequestConfig.custom().setConnectTimeout(5000).setSocketTimeout(8000).build();httpGet.setConfig(config );CloseableHttpResponse response = null;try {response = httpClient.execute(httpGet);if(response == null) {logger.info("鏈接超時!");}else {if(response.getStatusLine().getStatusCode() == 200) {HttpEntity entity = response.getEntity();String pageContent = EntityUtils.toString(entity, "utf-8"); // System.out.println("**********************************************************"+pageContent+"**********************************************************");parseHandledTargetUrlPageContent(pageContent,handledTargetUrl);}}} catch (ClientProtocolException e) {logger.error(handledTargetUrl+"-解析異常-ClientProtocolException",e);} catch (IOException e) {logger.error(handledTargetUrl+"-解析異常-IOException",e);}finally {try {if(response != null) {response.close();}} catch (IOException e) {logger.error(handledTargetUrl+"-解析異常-IOException",e);}}logger.info("結束爬取blog::"+URL);}/*** 解析加工后的百度云地址內容* @param pageContent* @param handledTargetUrl 加工后的百度云地址*/private static void parseHandledTargetUrlPageContent(String pageContent, String handledTargetUrl) {Document doc = Jsoup.parse(pageContent);Elements aEles = doc.select(".fileinfo .panurl a");if(aEles.size() == 0) {logger.info("沒有爬取到百度云地址");return;}String ahref = aEles.get(0).attr("href"); // System.out.println("**********************************************************"+ahref+"**********************************************************");getUserBaiduYunUrl(PROJECT_URL+ahref);}/*** 獲取被處理過的百度云鏈接內容* @param handledBaiduYunUrl 被處理過的百度云鏈接*/private static void getUserBaiduYunUrl(String handledBaiduYunUrl) {logger.info("開始爬取blog::"+handledBaiduYunUrl);HttpGet httpGet = new HttpGet(handledBaiduYunUrl);RequestConfig config = RequestConfig.custom().setConnectTimeout(5000).setSocketTimeout(8000).build();httpGet.setConfig(config );CloseableHttpResponse response = null;try {response = httpClient.execute(httpGet);if(response == null) {logger.info("鏈接超時!");}else {if(response.getStatusLine().getStatusCode() == 200) {HttpEntity entity = response.getEntity();String pageContent = EntityUtils.toString(entity, "utf-8"); // System.out.println("**********************************************************"+pageContent+"**********************************************************");parseHandledBaiduYunUrlPageContent(pageContent,handledBaiduYunUrl);}}} catch (ClientProtocolException e) {logger.error(handledBaiduYunUrl+"-解析異常-ClientProtocolException",e);} catch (IOException e) {logger.error(handledBaiduYunUrl+"-解析異常-IOException",e);}finally {try {if(response != null) {response.close();}} catch (IOException e) {logger.error(handledBaiduYunUrl+"-解析異常-IOException",e);}}logger.info("結束爬取blog::"+URL);}/*** 獲取百度云鏈接* @param pageContent* @param handledBaiduYunUrl*/private static void parseHandledBaiduYunUrlPageContent(String pageContent, String handledBaiduYunUrl) {Document doc = Jsoup.parse(pageContent);Elements aEles = doc.select("#check-result-no a");if(aEles.size() == 0) {logger.info("沒有爬取到百度云地址");return;}String ahref = aEles.get(0).attr("href");if((!ahref.contains("yun.baidu.com")) && (!ahref.contains("pan.baidu.com"))) return;logger.info("**********************************************************"+"爬取到第"+(++total)+"個目標對象:"+ahref+"**********************************************************"); // System.out.println("爬取到第"+(++total)+"個目標對象:"+ahref);String sql = "insert into `t_jsoup_article` values(null,?,?,null,now(),0,0,null,?,0,null)";try {PreparedStatement pst = con.prepareStatement(sql); // pst.setObject(1, UUID.randomUUID().toString());pst.setObject(1, "測試類容");pst.setObject(2, ahref);pst.setObject(3, ahref);if(pst.executeUpdate() == 0) {logger.info("爬取鏈接插入數據庫失敗!!!");}else {cache.put(new net.sf.ehcache.Element(handledBaiduYunUrl, handledBaiduYunUrl));logger.info("爬取鏈接插入數據庫成功!!!");}} catch (SQLException e) {logger.error(ahref+"-解析異常-SQLException",e);}}public static void start() {DbUtil dbUtil = new DbUtil();try {con = dbUtil.getCon();parseHomePage();} catch (Exception e) {logger.error("數據庫創建失敗",e);}}public static void main(String[] args) {start();} }爬這里面的鏈接
爬想要的電影:
MovieCrawlerStarter.java
總結
- 上一篇: 阿里巴巴将赴NIPS 2017 3大事业
- 下一篇: html字体变大自动换行,网页css中实