package regularExpression;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.URL;
import java.nio.charset.Charset;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class Test2
{
/*
* 简单的网络爬虫
* 想将淘宝的超链接取出来
*/
public static void main(String[] args) throws Exception
{
String getString = GetString("https://www.taobao.com/");
//这里写上非贪婪模式,不然这个链接里面还包含链接
// Pattern p=Pattern.compile("<a[\\s\\S]+?</a>");
//这里可以将href和后面的链接取出来
// Pattern p=Pattern.compile("href=\".+?\"");
// Pattern p=Pattern.compile("href=\"(.+?)\"");
//再次进行过滤
Pattern p=Pattern.compile("href=\"([\\w\\s./:]+?)\"");
Matcher m = p.matcher(getString);
while(m.find())
{
System.out.println(m.group(1));
}
}
public static String GetString(String url) throws Exception
{
URL url2 = new URL(url);
BufferedReader in=new BufferedReader(new InputStreamReader(url2.openStream(),Charset.forName("utf-8")));
StringBuffer all=new StringBuffer();
String temp="";
while((temp=in.readLine())!=null)
{
all.append(temp+"\n");
}
return all.toString();
}
}
java利用正则表达式写一个简单的网络爬虫
猜你喜欢
转载自blog.csdn.net/qq_43416157/article/details/104385593
今日推荐
周排行