java正则表达式抓取网页URl时,我只想抓取部分URl,如何匹配?

代码不说谎 2010-07-12 10:41:00
下面是我在网上找到的一个例子,运行之后它会把域名下的所有网页抓取回来,而我只想要域名下部分网页中的url,比如我给的地址是book.easou.com,它会把easou.com下所有网页中的URL给抓回来,但我只想要book.easou.com下的网页中的URL,那个正则表达式,我改了很久还是达不到我想要的效果,谁能帮我看看?谢谢!

package com.easou.crawler;

import java.io.File;
import java.io.BufferedReader;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.net.URL;
import java.net.URLConnection;
import java.util.ArrayList;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.util.Hashtable;

public class Crawler {
private int webDepth = 1; // 爬虫深度
private int intThreadNum = 5; // 线程数
private String strHomePage = ""; // 主页地址
private String myDomain; // 域名
private String fPath = "web"; // 储存网页文件的目录名
private ArrayList<String> arrUrls = new ArrayList<String>(); // 存储未处理URL
private ArrayList<String> arrUrl = new ArrayList<String>(); // 存储所有URL供建立索引
private Hashtable<String, Integer> allUrls = new Hashtable<String, Integer>(); // 存储所有URL的网页号
private Hashtable<String, Integer> deepUrls = new Hashtable<String, Integer>(); // 存储所有URL深度
private int intWebIndex = 0; // 网页对应文件下标,从0开始
private String charset = "UTF-8";
private String report = "";
private long startTime;
private int webSuccessed = 0;
private int webFailed = 0;

public Crawler(String s, int i) {
this.strHomePage = s;
this.webDepth = i;
}

public synchronized void addWebSuccessed() {
webSuccessed++;
}

public synchronized void addWebFailed() {
webFailed++;
}

public synchronized void addReport(String s) {
try {
report += s;
PrintWriter pwReport = new PrintWriter(new FileOutputStream(
"report.txt"));
pwReport.println(report);
pwReport.close();
} catch (Exception e) {
System.out.println("生成报告文件失败!");
}
}

public synchronized String getAUrl() {
String tmpAUrl = arrUrls.get(0);
arrUrls.remove(0);
return tmpAUrl;
}

public synchronized String getUrl() {
String tmpUrl = arrUrl.get(0);
arrUrl.remove(0);
return tmpUrl;
}

public synchronized Integer getIntWebIndex() {
intWebIndex++;
return intWebIndex;
}


public void getWebByHomePage() {
startTime = System.currentTimeMillis();
this.myDomain = getDomain();
if (myDomain == null) {
System.out.println("Wrong input!");
// System.exit(1);
return;
}
System.out.println("Homepage = " + strHomePage);
addReport("Homepage = " + strHomePage + "!\n");
System.out.println("Domain = " + myDomain);
addReport("Domain = " + myDomain + "!\n");
arrUrls.add(strHomePage);
arrUrl.add(strHomePage);
allUrls.put(strHomePage, 0);
deepUrls.put(strHomePage, 1);
File fDir = new File(fPath);
if (!fDir.exists()) {
fDir.mkdir();
}
System.out.println("Start!");
this.addReport("Start!\n");
String tmp = getAUrl();
this.getWebByUrl(tmp, charset, allUrls.get(tmp) + "");
int i = 0;
for (i = 0; i < intThreadNum; i++) {
new Thread(new Processer(this)).start();
}
while (true) {
if (arrUrls.isEmpty() && Thread.activeCount() == 1) {
long finishTime = System.currentTimeMillis();
long costTime = finishTime - startTime;
System.out.println("\n\n\n\n\nFinished!");
addReport("\n\n\n\n\nFinished!\n");
System.out.println("Start time = " + startTime + " "
+ "Finish time = " + finishTime + " "
+ "Cost time = " + costTime + "ms");
addReport("Start time = " + startTime + " "
+ "Finish time = " + finishTime + " "
+ "Cost time = " + costTime + "ms" + "\n");
System.out.println("Total url number = "
+ (webSuccessed + webFailed) + " Successed: "
+ webSuccessed + " Failed: " + webFailed);
addReport("Total url number = " + (webSuccessed + webFailed)
+ " Successed: " + webSuccessed + " Failed: "
+ webFailed + "\n");

String strIndex = "";
String tmpUrl = "";
while (!arrUrl.isEmpty()) {
tmpUrl = getUrl();
strIndex += "Web depth:" + deepUrls.get(tmpUrl)
+ " Filepath: " + fPath + "/web"
+ allUrls.get(tmpUrl) + ".htm" + " url:" + tmpUrl
+ "\n\n";
}
System.out.println(strIndex);
break;
}
}
}

public void getWebByUrl(String strUrl, String charset, String fileIndex) {
try {
System.out.println("Getting web by url: " + strUrl);
addReport("Getting web by url: " + strUrl + "\n");
URL url = new URL(strUrl);
URLConnection conn = url.openConnection();
conn.setDoOutput(true);
InputStream is = null;
is = url.openStream();
BufferedReader bReader = new BufferedReader(new InputStreamReader(
is));
String rLine = null;
String tmp_rLine = null;
while ((rLine = bReader.readLine()) != null) {
tmp_rLine = rLine;
int str_len = tmp_rLine.length();
if (str_len > 0) {
if (deepUrls.get(strUrl) < webDepth)
getUrlByString(tmp_rLine, strUrl);
}
tmp_rLine = null;
}
is.close();
System.out.println("Get web successfully! " + strUrl);
addReport("Get web successfully! " + strUrl + "\n");
addWebSuccessed();
} catch (Exception e) {
System.out.println("Get web failed! " + strUrl);
addReport("Get web failed! " + strUrl + "\n");
addWebFailed();
}
}

public String getDomain() {
String reg = "(?<=http\\://[a-zA-Z0-9]{0,100}[.]{0,1})[^.\\s]*?\\.(com|cn|net|org|biz|info|cc|tv)";
Pattern p = Pattern.compile(reg, Pattern.CASE_INSENSITIVE);
Matcher m = p.matcher(strHomePage);
boolean blnp = m.find();
if (blnp == true) {
return m.group(0);
}
return null;
}

public void getUrlByString(String inputArgs, String strUrl) {
String tmpStr = inputArgs;

String regUrl = "(?<=(href=)[\"]?[\']?)[http://][^\\s\"\'\\?]*("
+ myDomain + ")[^\\s\"\'>]*";


Pattern p = Pattern.compile(regUrl, Pattern.CASE_INSENSITIVE);
Matcher m = p.matcher(tmpStr);
boolean blnp = m.find();
while (blnp == true) {
if (!allUrls.containsKey(m.group(0))) {
System.out.println("Find a new url,depth:"
+ (deepUrls.get(strUrl) + 1) + " " + m.group(0));
addReport("Find a new url,depth:" + (deepUrls.get(strUrl) + 1)
+ " " + m.group(0) + "\n");
arrUrls.add(m.group(0));
arrUrl.add(m.group(0));
allUrls.put(m.group(0), getIntWebIndex());
deepUrls.put(m.group(0), (deepUrls.get(strUrl) + 1)); //新的URL深度+1
}
tmpStr = tmpStr.substring(m.end(), tmpStr.length());
m = p.matcher(tmpStr);
blnp = m.find();
}
}

class Processer implements Runnable {
Crawler gw;

public Processer(Crawler g) {
this.gw = g;
}

public void run() {
while (!arrUrls.isEmpty()) {
String tmp = getAUrl();
getWebByUrl(tmp, charset, allUrls.get(tmp) + "");
}
}
}
}
...全文
1568 9 打赏 收藏 转发到动态 举报
写回复
用AI写文章
9 条回复
切换为时间正序
请发表友善的回复…
发表回复
brandon_le 2010-10-10
  • 打赏
  • 举报
回复
楼上兄弟精神可佳
水煮沉浮 2010-10-10
  • 打赏
  • 举报
回复
代码长可以不看,蛋疼一定要挠挠。。。
thegodofwar 2010-10-10
  • 打赏
  • 举报
回复
代码很乱,正则更是写的蛋疼
tomjerrycat 2010-09-04
  • 打赏
  • 举报
回复
有种就把代码解释出来给人学习,不要再这里贴一段不知道哪里来的代码。我们看不懂才来这里的。
zhaohuihua 2010-09-04
  • 打赏
  • 举报
回复
7月份的帖子???
害得俺浪费这么多脑细胞!!!

既然回了就再多说两句吧
想抓取book.easou.com下的所有网页中的URL
那么你红色标出来的正则表达式也有问题
String regUrl = "(?<=(href=)[\"]?[\']?)[http://][^\\s\"\'\\?]*("
+ myDomain + ")[^\\s\"\'>]*";
这样提取的是页面中包含book.easou.com的URL
依我的理解,应该是book.easou.com下所有<a href="...">
而a标记的href至少可以分为3种情况,应分别处理,只用一个正则表达式不好搞掂:
1. 绝对路径,即以http://开头的
2. 本站要路径,即以/开头的,如<a href="/book.html">,前面应该加上域名
3. 相对路径,如<a href="book/1.html">,前面应该加上当前页面的路径部分
zhaohuihua 2010-09-04
  • 打赏
  • 举报
回复
但是我在浏览器中输入book.easou.com会跳转至www.easou.com/index.html,无法测试
因此找了一个类似的域名mo.easou.com
测试了一下,没有问题

public static void main(String[] args) {
new Crawler("http://mo.easou.com", 2).getWebByHomePage();
}
zhaohuihua 2010-09-04
  • 打赏
  • 举报
回复
如果你想抓取*.easou.com下的所有网页中的URL
要改的不是你红色标出来的地方,而是getDomain()
其中的正则表达式改为:(?<=http\\://)(?:[^.\\s]*?\\.)+(com|cn|net|org|biz|info|cc|tv)

public String getDomain() {
String reg = "(?<=http\\://)(?:[^.\\s]*?\\.)+(com|cn|net|org|biz|info|cc|tv)";
Pattern p = Pattern.compile(reg, Pattern.CASE_INSENSITIVE);
Matcher m = p.matcher(strHomePage);
boolean blnp = m.find();
if (blnp == true) {
return m.group(0);
}
return null;
}
hy158753228 2010-09-04
  • 打赏
  • 举报
回复
还是先JSOUP之类的HTML解析器抓取URL先,然后分析得到的URL list。
代码不说谎 2010-07-12
  • 打赏
  • 举报
回复
红色部分就是我现在用的正则表达式,但是达不到我想要的效果,谁能帮我改改?

我只想抓取book.easou.com下的所有网页中的URL

62,614

社区成员

发帖
与我相关
我的任务
社区描述
Java 2 Standard Edition
社区管理员
  • Java SE
加入社区
  • 近7日
  • 近30日
  • 至今
社区公告
暂无公告

试试用AI创作助手写篇文章吧