Python安全小工具之Web目录扫描器

本次的程序实质是Web目录的暴力破解,即基本过程为取出字典内容、和目标网站进行组合、然后进行请求识别,能否扫描出相应的Web目录主要看字典的强大与否。至于字典,网上也很多资源,收集一下即可。

这里为了方便,将收集的UA自己保存成一个文件方便其他脚本直接调用。

user_agent_list.py:

#!/usr/bin/python
#coding=utf-8
import random

def get_user_agent():
	user_agent_list = [
		{'User-Agent':'Mozilla/4.0 (Mozilla/4.0; MSIE 7.0; Windows NT 5.1; FDM; SV1; .NET CLR 3.0.04506.30)'},
		{'User-Agent':'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; en) Opera 11.00'},
		{'User-Agent':'Mozilla/5.0 (X11; U; Linux i686; de; rv:1.9.0.2) Gecko/2008092313 Ubuntu/8.04 (hardy) Firefox/3.0.2'},
		{'User-Agent':'Mozilla/5.0 (X11; U; Linux i686; en-GB; rv:1.9.1.15) Gecko/20101027 Fedora/3.5.15-1.fc12 Firefox/3.5.15'},
		{'User-Agent':'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/8.0.551.0 Safari/534.10'},
		{'User-Agent':'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.2) Gecko/2008092809 Gentoo Firefox/3.0.2'},
		{'User-Agent':'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/7.0.544.0'},
		{'User-Agent':'Opera/9.10 (Windows NT 5.2; U; en)'},
		{'User-Agent':'Mozilla/5.0 (iPhone; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko)'},
		{'User-Agent':'Opera/9.80 (X11; U; Linux i686; en-US; rv:1.9.2.3) Presto/2.2.15 Version/10.10'},
		{'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru-RU) AppleWebKit/533.18.1 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5'},
		{'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 5.1; ru; rv:1.9b3) Gecko/2008020514 Firefox/3.0b3'},
		{'User-Agent':'Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10_4_11; fr) AppleWebKit/533.16 (KHTML, like Gecko) Version/5.0 Safari/533.16'},
		{'User-Agent':'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20'},
		{'User-Agent':'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)'},
		{'User-Agent':'Mozilla/4.0 (compatible; MSIE 6.0; X11; Linux x86_64; en) Opera 9.60'},
		{'User-Agent':'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_2; en-US) AppleWebKit/533.4 (KHTML, like Gecko) Chrome/5.0.366.0 Safari/533.4'},
		{'User-Agent':'Mozilla/5.0 (Windows NT 6.0; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.51'}
	]

	return random.choice(user_agent_list)


然后将该脚本放在名为agent_proxy目录中。

接着是主程序,使用多线程以及读取文件的形式来实现Web目录的暴破扫描,最后将扫描出来的结果以HTML文件的超链接的形式展现:

#!/usr/bin/python
#coding=utf-8
import requests
import sys
from Queue import Queue
import threading
from agent_proxy import user_agent_list
from optparse import OptionParser

class DirScanMain:
	"""docstring for DirScanMain"""
	def __init__(self, options):
		self.url = options.url
		self.filename = options.filename
		self.count = options.count
		
	class DirScan(threading.Thread):
		"""docstring for DirScan"""
		def __init__(self, queue,total):
			threading.Thread.__init__(self)
			self._queue = queue
			self._total = total
				
		def run(self):
			while not self._queue.empty():
				url = self._queue.get()

				#
				threading.Thread(target=self.msg).start()

				try:
					r = requests.get(url=url, headers=user_agent_list.get_user_agent(), timeout=8,)
					if r.status_code == 200:
						sys.stdout.write('\r' + '[+]%s\t\t\n' % (url))
						result = open('result.html','a+')
						result.write('<a href="' + url + '" target="_blank">' + url + '</a>')
						result.write('\r\n</br>')
						result.close()
				except Exception as e:
					pass

		def msg(self):
			# print self._total,self._queue.qsize()
			per = 100 - float(self._queue.qsize())/float(self._total) * 100
			percentage = "%s Finished| %s All| Scan in %1.f %s"%((self._total - self._queue.qsize()),self._total,per,'%')
			sys.stdout.write('\r'+'[*]'+percentage)

	def start(self):
		result = open('result.html','w')
		result.close()

		queue = Queue()

		f = open('./dics/%s'%self.filename,'r')
		for i in f:
			queue.put(self.url+i.rstrip('\n'))

		#
		total = queue.qsize()

		threads = []
		thread_count = int(self.count)

		for i in range(thread_count):
			threads.append(self.DirScan(queue,total))
		for i in threads:
			i.start()
		for i in threads:
			i.join()

if __name__ == '__main__':

	print '''
	 ____  _      ____                  
	|  _ \(_)_ __/ ___|  ___ __ _ _ __  
	| | | | | '__\___ \ / __/ _` | '_ \ 
	| |_| | | |   ___) | (_| (_| | | | |
	|____/|_|_|  |____/ \___\__,_|_| |_|

	'''

	parser = OptionParser('./web_dir_scan.py -u <Target URL> -f <Dictionary file name> [-t <Thread_count>]')
	parser.add_option('-u','--url',dest='url',type='string',help='target url for scan')
	parser.add_option('-f','--file',dest='filename',type='string',help='dictionary filename')
	parser.add_option('-t','--thread',dest='count',type='int',default=10,help='scan thread_count')
	(options,args)=parser.parse_args()

	if options.url and options.filename:
		# start(options.url,options.filename,options.count)
		dirscan = DirScanMain(options)
		dirscan.start()
		sys.exit(1)
	else:
		parser.print_help()
		sys.exit(1)


运行结果:




后期会使用Django框架编写一个Web目录扫描系统,当然是使用简单的暴破的原理,想着是有一个界面平台来方便操作。

猜你喜欢

转载自blog.csdn.net/ski_12/article/details/78443601
今日推荐