我的git地址唯空自取 源碼請上git上下載,包含所需jar包
接上文
瀏覽一部分圖片之后發現了個問題,圖片還是太小普遍不超過300k,而且很多圖片上面都有另外一個網站的水印
果斷點進去看看,果然不一樣。圖片全是高清的
然后知道了原來那個應用里面的圖片全是從這個網站里面爬的,而且還都是壓縮過的文件,太無恥了。。。
找到高清圖該下手了
於是仿照之前那個模式開始了
然后命途多舛,它的app竟然是網頁改版的,而且接口不跟之前那個一樣簡單。於是決定更新一下自己的爬蟲,從單個網頁進行爬取,爬全站大圖
下面是代碼:
第一個是隊列類,存放鏈接,之所以放訪問過的是為了防止重復訪問,設置成static是為了不同類之間共享
package com.feng.main; import java.util.LinkedList; import java.util.Queue; public class Queues { /** * 圖片 */ public static Queue<String> imgUrlQueue = new LinkedList<String>(); /** * 網頁 */ public static Queue<String> htmlUrlQueue = new LinkedList<String>(); /** * 下載后的圖片 */ public static Queue<String> visitedImgUrlQueue = new LinkedList<String>(); /** * 下載后的網頁 */ public static Queue<String> visitedHtmlUrlQueue = new LinkedList<String>(); }
然后這個類是為了獲取網頁實體,並將實體轉換為String類型方便處理
package com.feng.main; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.client.HttpClient; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.HttpClients; public class HtmlContent { /** * 獲取HttpEntity * @return HttpEntity網頁實體 */ private HttpEntity getHttpEntity(String url){ HttpResponse response = null;//創建請求響應 //創建httpclient對象 HttpClient httpClient = HttpClients.createDefault(); HttpGet get = new HttpGet(url); RequestConfig requestConfig = RequestConfig.custom() .setSocketTimeout(5000) //設置請求超時時間 .setConnectionRequestTimeout(5000) //設置傳輸超時時間 .build(); get.setConfig(requestConfig);//設置請求的參數 // try { response = httpClient.execute(get); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } //獲取返回狀態 200為響應成功 // StatusLine state = response.getStatusLine(); //獲取網頁實體 HttpEntity httpEntity = response.getEntity(); return httpEntity; // try { // return httpEntity.getContent(); // } catch (IllegalStateException | IOException e) { // // TODO Auto-generated catch block // e.printStackTrace(); // } // return null; } /** * 獲取整個html以String形式輸出 * @param url * @return */ public String getContent(String url){ HttpEntity httpEntity = getHttpEntity(url); String content = ""; try { InputStream is = httpEntity.getContent(); InputStreamReader isr = new InputStreamReader(is); char[] c = new char[1024]; int l = 0; while((l = isr.read(c)) != -1){ content += new String(c,0,l); } isr.close(); is.close(); } catch (IllegalStateException | IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } return content; } }
下一個類是通過上面給的那個類傳遞過來的String類型的網頁,從里面提取出來圖片與html連接,並放入圖片隊列與網頁隊列中,這樣就能保證每個網頁只訪問(下載)一次
同時也是獲取鏈接的線程,使獲取url與下載圖片分開,這樣就能使用雙線程兩者互不干擾同時也能通過共享的圖片鏈接隊列(imgUrlQueue)聯系在一起
package com.feng.main; import java.util.regex.Matcher; import java.util.regex.Pattern; public class AddQueue extends Thread{ //這里添加個regex是為了防止找到外部鏈接而使程序無法停止 String regex = ""; AddQueue(String regex){ this.regex = regex; } public void run(){ System.out.println("開始執行add線程"); while(true){ if(Queues.htmlUrlQueue.size() <50 || Queues.imgUrlQueue.size() < 100){ //獲取並移除 String url = Queues.htmlUrlQueue.poll(); String content = new HtmlContent().getContent(url); getHtmlUrl(content); getImagesUrl(content); //添加至已訪問隊列 Queues.visitedHtmlUrlQueue.offer(url); } } } /** * 獲取所有頁面鏈接 * @param context */ public void getHtmlUrl(String context){ //如何去除外部鏈接 String regex = "http://([\\w-]+\\.)+[\\w-]+(/[\\w-./?%&=]*)?"; Pattern p = Pattern.compile(regex); Matcher m = p.matcher(context); while(m.find()){ String url = m.group(); if(!Queues.visitedHtmlUrlQueue.contains(url) && url.contains(this.regex)){ Queues.htmlUrlQueue.offer(url); System.out.println("add Html url : "+url); } } } /** * 獲取所有圖片鏈接 * @param context */ public void getImagesUrl(String context){ //如何去除外部鏈接 String regex = "http://([\\w-]+\\.)+[\\w-]+(/[\\w-./?%&=]*)?(.jpg|.mp4|.rmvb|.png|.mkv|.gif|.bmp|.jpeg|.flv|.avi|.asf|.rm|.wmv)+"; Pattern p = Pattern.compile(regex); Matcher m = p.matcher(context); while(m.find()){ String url = m.group(); if(!Queues.visitedImgUrlQueue.contains(url) && url.contains(this.regex)){ Queues.imgUrlQueue.offer(url); System.out.println("add Image url : "+url); } } } }
最后是下載類,這個是上一篇文章里面的精簡版
package com.feng.main; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.util.ArrayList; import java.util.Date; import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.http.HttpEntity; import org.apache.http.HttpResponse; import org.apache.http.client.HttpClient; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.HttpClients; public class DownLoadImg { List<String> imgFormat = new ArrayList<String>(); DownLoadImg(){ imgFormat.add("jpg"); imgFormat.add("jpeg"); imgFormat.add("png"); imgFormat.add("gif"); imgFormat.add("bmp"); } /** * 獲取HttpEntity * @return HttpEntity網頁實體 */ private HttpEntity getHttpEntity(String url){ HttpResponse response = null;//創建請求響應 //創建httpclient對象 HttpClient httpClient = HttpClients.createDefault(); HttpGet get = new HttpGet(url); RequestConfig requestConfig = RequestConfig.custom() .setSocketTimeout(5000) //設置請求超時時間 .setConnectionRequestTimeout(5000) //設置傳輸超時時間 .build(); get.setConfig(requestConfig);//設置請求的參數 // try { response = httpClient.execute(get); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } //獲取返回狀態 200為響應成功 // StatusLine state = response.getStatusLine(); //獲取網頁實體 HttpEntity httpEntity = response.getEntity(); return httpEntity; // try { // return httpEntity.getContent(); // } catch (IllegalStateException | IOException e) { // // TODO Auto-generated catch block // e.printStackTrace(); // } // return null; } /** * 下載圖片 * @param url * @param is */ public int downloadImage(String url){ try{ HttpEntity httpEntity = getHttpEntity(url); long len = httpEntity.getContentLength()/1024; System.out.println("下載的文件大小為:"+len+"k"); if(len < 150){ System.out.println("Warring:文件太小,不予下載--------"); return 0; } String realPath = getRealPath(url); String name = getName(url); System.out.println("文件夾路徑:"+realPath); System.out.println("文件名字:"+name); InputStream is = httpEntity.getContent(); //此方法不行 // System.out.println(is.available()/1024+"k"); int l = 0; byte[] b = new byte[1024]; FileOutputStream fos = new FileOutputStream(new File(realPath+"/"+name)); while((l = is.read(b)) != -1){ fos.write(b, 0, l); } fos.flush(); fos.close(); is.close(); System.out.println("下載:"+url+"成功\n"); }catch(Exception e){ System.out.println("下載:"+url+"失敗"); e.printStackTrace(); } return 1; } /** * 創建並把存儲的位置返回回去 * @param url * @return */ private String getRealPath(String url){ Pattern p = Pattern.compile("images/[a-z]+/[a-z_0-9]+"); Matcher m = p.matcher(url); String format = getName(url).split("\\.")[1]; String path = null; //說明是圖片 if(imgFormat.contains(format)){ path = "media/images/"; }else{ path = "media/video/"; } path += url.split("/")[(url.split("/").length-2)]; if(m.find()){ path = m.group(); }; //添加盤符 path = "D:/"+path; File file = new File(path); if(!file.exists()){ file.mkdirs(); } return path; } /** * 獲取文件名 * @param url * @return */ private String getName(String url){ // s3.substring(s3.lastIndexOf("/")+1) return url.substring(url.lastIndexOf("/")+1); } }
然后是下載圖片的線程
package com.feng.main; public class DownloadImages extends Thread{ public void run(){ System.out.println("開始下載"); while(Queues.imgUrlQueue.size() > 0){ //只要有數據就下載 //可以多線程執行下載 String url = Queues.imgUrlQueue.poll(); System.out.println("開始下載:"+url); //下載 new DownLoadImg().downloadImage(url); Queues.visitedImgUrlQueue.offer(url); } } }
最后就是主方法了
package com.feng.main; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.regex.Matcher; import java.util.regex.Pattern; public class MainTest { public static void main(String[] args) { String startUrl = "http://www.jdlingyu.net/cosplay/"; Pattern p = Pattern.compile("http://([\\w-]+\\.)+[\\w-]+"); Matcher m = p.matcher(startUrl); // down.start(startUrl); m.find(); String regex = m.group(); System.out.println("regex : "+regex); Queues.htmlUrlQueue.offer(startUrl); //在線程池開啟三個線程 ExecutorService pool = Executors.newFixedThreadPool(3); Thread add = new AddQueue(regex); Thread down = new DownloadImages(); //加入線程池 pool.execute(add); try { //延時進行下載,防止后面執行下載時urlQueue中沒有鏈接 Thread.sleep(5000); } catch (InterruptedException e) { e.printStackTrace(); } pool.execute(down); pool.execute(down); pool.shutdown(); } }
最后完成,開始下載。
在最后,成果展示
《完》