subfinder -silent -dL domain.txt | dnsx -silent | naabu -top-ports 1000 -silent| httpx -title -tech-detect -status-code
subfinder -d domain.net -silent | ksubdomain e --stdin --silent | naabu -top-ports 1000 -silent
subfinder -silent -dL domain.txt | dnsx -silent | httpx -o output.txt
subfinder -d domain.com -silent
subfinder -silent -dL domain.txt | dnsx -silent | httpx -mc 200 -timeout 30 -o output.txt
subfinder -silent -dL domain.txt | anew domians.txt | httpx -title -tech-detect -status-code
gowitness file <urlslist.txt>
爬虫获取链接,运行20个站点每个站点10个爬虫
gospider -S domain.txt -o output.txt -c 10 -d 1 -t 20
通过爬虫获取链接包含子域名
gospider -s "https://google.com/" -o output -c 10 -d 1 --other-source --include-subs
对文件进行处理过滤出js文件
gospider -S urls.txt | grep js | tee -a js-urls.txt
echo domain.com | gau --blacklist png,jpg,gif,html,eot,svg,woff,woff2 | httpx -title -tech-detect -status-code
cat domian.txt | gau --blacklist png,jpg,gif,html,eot,svg,woff,woff2 | httpx -title -tech-detect -status-code
此命令也获取了标题,直接操作httpx即可衔接下一个命令运行工具
cat livejs.txt | grep dev
cat livejs.txt | grep app
cat livejs.txt | grep static
cat jslist.txt | fff | grep 200 | cut -d “ “ -f1 | tee livejs.txt
cat subdomains.txt | waybackurls | gf xss | qsreplace | tee xss.txt
subfinder -d domain.com -silent | gau --blacklist png,jpg,gif,html,eot,svg,woff,woff2 | httpx -o 200.txt
for i in $(cat 200.txt);do echo "xray scanning $i" ; ./xray webscan --browser-crawler $i --html-output vuln.html; done
cat url.txt | hakrawler -subs |grep -v key | grep key |grep -v google | grep = | dalfox pipe --silence --skip-bav
单目标扫描
arjun -u https://api.example.com/endpoint
单目标扫描输出
arjun -u https://api.example.com/endpoint -oJ result.json
多目标扫描
arjun -i targets.txt
指定方法
arjun -u https://api.example.com/endpoint -m POST
热文推荐