当前位置: 首页 > news >正文

【网站内容安全检测】之1:获取网站所有链接sitemap数据

不多BB,直接上代码:
main.go

package mainimport ("bufio""crypto/tls""fmt""io""net/http""net/url""os""strings""sync""time"_ "net/http/pprof""log""github.com/PuerkitoBio/goquery""github.com/schollz/progressbar/v3"
)type WebCrawler struct {startURLs     []stringbaseDomains   map[string]boolvisitedURLs   sync.MapurlsToVisit   chan stringsemaphore     chan struct{}timeout       time.DurationverifySSL     boolclient        *http.ClientprogressBar   *progressbar.ProgressBarwg            sync.WaitGroup
}func NewWebCrawler(startURLs []string, maxConnections int, timeout int, verifySSL bool) *WebCrawler {baseDomains := make(map[string]bool)for _, u := range startURLs {parsed, _ := url.Parse(u)baseDomains[parsed.Host] = true}return &WebCrawler{startURLs:   startURLs,baseDomains: baseDomains,urlsToVisit: make(chan string, 1000),semaphore:   make(chan struct{}, maxConnections),timeout:     time.Duration(timeout) * time.Second,verifySSL:   verifySSL,}
}func (c *WebCrawler) initClient() {tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: !c.verifySSL},}c.client = &http.Client{Timeout:   c.timeout,Transport: tr,}
}func (c *WebCrawler) normalizeURL(rawURL string, baseURL string) (string, error) {base, err := url.Parse(baseURL)if err != nil || base == nil {return "", fmt.Errorf("invalid base URL: %v", err)}u, err := url.Parse(rawURL)if err != nil || u == nil {return "", fmt.Errorf("invalid URL: %v", err)}return base.ResolveReference(u).String(), nil
}func (c *WebCrawler) isValidURL(rawURL string) bool {parsed, err := url.Parse(rawURL)if err != nil {return false}if parsed.Scheme != "http" && parsed.Scheme != "https" {return false}if !c.baseDomains[parsed.Host] {return false}extensions := []string{".jpg", ".jpeg", ".png", ".gif", ".pdf", ".zip"}for _, ext := range extensions {if strings.HasSuffix(strings.ToLower(parsed.Path), ext) {return false}}return true
}func (c *WebCrawler) fetchURL(url string) (string, error) {c.semaphore <- struct{}{}defer func() { <-c.semaphore }()req, err := http.NewRequest("GET", url, nil)if err != nil {return "", fmt.Errorf("request creation failed: %v", err)}req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36")resp, err := c.client.Do(req)if err != nil {if strings.Contains(err.Error(), "no such host") {return "", fmt.Errorf("DNS lookup failed for %s", url)}return "", fmt.Errorf("request failed: %v", err)}defer resp.Body.Close()if resp.StatusCode != 200 {return "", fmt.Errorf("non-200 status: %d", resp.StatusCode)}if !strings.Contains(resp.Header.Get("Content-Type"), "text/html") {return "", fmt.Errorf("non-HTML content type: %s", resp.Header.Get("Content-Type"))}body, err := io.ReadAll(resp.Body)if err != nil {return "", fmt.Errorf("error reading response: %v", err)}return string(body), nil
}func (c *WebCrawler) parseLinks(html string, baseURL string) []string {doc, err := goquery.NewDocumentFromReader(strings.NewReader(html))if err != nil {log.Printf("Error parsing HTML: %v", err)return nil}var links []stringdoc.Find("a[href]").Each(func(i int, s *goquery.Selection) {href, exists := s.Attr("href")if !exists || strings.HasPrefix(href, "javascript:") || href == "#" {return}normalized, err := c.normalizeURL(href, baseURL)if err != nil {log.Printf("Error normalizing URL %s: %v", href, err)return}if c.isValidURL(normalized) {links = append(links, normalized)}})doc.Find("[src]").Each(func(i int, s *goquery.Selection) {src, exists := s.Attr("src")if !exists || strings.HasPrefix(src, "data:") {return}normalized, err := c.normalizeURL(src, baseURL)if err != nil {log.Printf("Error normalizing URL %s: %v", src, err)return}if c.isValidURL(normalized) {links = append(links, normalized)}})return links
}func (c *WebCrawler) processURL(url string) {defer c.wg.Done()if _, exists := c.visitedURLs.Load(url); exists {return}c.visitedURLs.Store(url, true)html, err := c.fetchURL(url)if err != nil {fmt.Printf("Error fetching %s: %v\n", url, err)return}newLinks := c.parseLinks(html, url)for _, link := range newLinks {if _, exists := c.visitedURLs.Load(link); !exists {c.urlsToVisit <- link}}if c.progressBar != nil {c.progressBar.Add(1)}
}func (c *WebCrawler) crawl() {c.initClient()c.progressBar = progressbar.Default(-1, "爬取进度")defer c.progressBar.Close()for _, url := range c.startURLs {c.wg.Add(1)go c.processURL(url)}go func() {for newURL := range c.urlsToVisit {if _, exists := c.visitedURLs.Load(newURL); !exists {c.wg.Add(1)go c.processURL(newURL)}}}()c.wg.Wait()
}func (c *WebCrawler) saveResults(filename string) {file, err := os.Create(filename)if err != nil {fmt.Printf("Error creating file: %v\n", err)return}defer file.Close()c.visitedURLs.Range(func(key, _ interface{}) bool {file.WriteString(key.(string) + "\n")return true})
}func (c *WebCrawler) run() {startTime := time.Now()c.crawl()elapsed := time.Since(startTime)fmt.Printf("\n爬取完成!\n")// 修复语法错误:添加缺少的括号和逗号visitedCount := 0c.visitedURLs.Range(func(key, _ interface{}) bool {visitedCount++return true})fmt.Printf("共爬取 %d 个URL\n", visitedCount)fmt.Printf("用时: %.2f 秒\n", elapsed.Seconds())outputFile := "multi_domain_links.txt"c.saveResults(outputFile)fmt.Printf("结果已保存到 %s\n", outputFile)
}func main() {go func() {log.Println(http.ListenAndServe("localhost:6060", nil))}()if len(os.Args) < 2 {fmt.Println("用法: go run web_crawler.go <URL文件路径> [verify_ssl]")fmt.Println("例如: go run web_crawler.go urls.txt")fmt.Println("或: go run web_crawler.go urls.txt true")return}urlFile := os.Args[1]file, err := os.Open(urlFile)if err != nil {fmt.Printf("错误:文件 %s 不存在\n", urlFile)return}defer file.Close()var startURLs []stringscanner := bufio.NewScanner(file)for scanner.Scan() {if url := strings.TrimSpace(scanner.Text()); url != "" {startURLs = append(startURLs, url)}}if len(startURLs) == 0 {fmt.Println("错误:URL文件为空")return}verifySSL := falseif len(os.Args) > 2 {verifySSL = os.Args[2] == "true"}crawler := NewWebCrawler(startURLs, 50, 20, verifySSL)// 添加开始运行提示fmt.Printf("开始爬取网站,起始URL数量: %d,是否验证SSL: %v\n", len(startURLs), verifySSL)crawler.run()
}

go.mod

module webcrawlergo 1.24.4require (github.com/PuerkitoBio/goquery v1.10.3 // indirectgithub.com/andybalholm/cascadia v1.3.3 // indirectgithub.com/mattn/go-sqlite3 v1.14.28 // indirectgithub.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirectgithub.com/rivo/uniseg v0.4.7 // indirectgithub.com/schollz/progressbar/v3 v3.18.0 // indirectgolang.org/x/net v0.39.0 // indirectgolang.org/x/sys v0.32.0 // indirectgolang.org/x/term v0.31.0 // indirect
)	

domains.txt

www.网址.com
www.网址2.com

运行命令

go run web_crawler.go .\domains.txt

结束后会自动将结果生成到当前目录中

http://www.lryc.cn/news/575224.html

相关文章:

  • Sortablejs动态同类型穿插
  • MySQL之视图深度解析
  • 灰度发布怎么保证数据库一致的
  • Windows10中设置多个虚拟IP方法
  • Swagger 在 Spring Boot 中的详细使用指南
  • PDF处理控件Spire.PDF系列教程:Python中快速提取PDF文本、表格、图像及文档信息
  • Python 数据分析与可视化 Day 7 - 可视化整合报告实战
  • 视频中的开放世界目标计数
  • gitbash中执行命令巨慢
  • 淘宝API安全合规指南:避免数据泄露与封禁
  • AI助教来袭:用n8n和Gemini搭建英语作文自动批阅与学情分析系统
  • 【网站内容安全检测】之2:从网站所有URL页面中提取所有外部及内部域名信息
  • request这个包中,get 这个方法里传入的是params ,post这个方法里传入的是data 和 json。这个区别是什么?
  • 每日AI资讯速递 | 2025-06-25
  • 深入理解 Spring 框架的 Bean 管理与 IOC​
  • 车牌识别与标注:基于百度OCR与OpenCV的实现(一)
  • (C++)vector数组相关基础用法(C++教程)(STL库基础教程)
  • MiniMax-M1混合MoE大语言模型(本地运行和私有化搭建)
  • 数据结构 顺序表与链表
  • 深入学习入门--(一)前备知识
  • C++11原子操作:从入门到精通
  • 从数据到决策:UI前端如何利用数字孪生技术提升管理效率?
  • Webpack 构建过程详解
  • Web层注解
  • python学习笔记(深度学习)
  • FPGA基础 -- Verilog 格雷码(Gray Code)计数器设计与原理解析
  • 【网站内容安全检测】之3:获取所有外部域名访问后图像
  • ABP VNext + Ocelot API 网关:微服务统一入口与安全策略
  • Boosting:从理论到实践——集成学习中的偏差征服者
  • webman 利用tcp 做服务端 对接物联网