基于 Go1.19 的站点模板爬虫:构建与实战
package main
import (
"fmt"
"log"
"net/http"
"os"
"time"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"golang.org/x/net/html"
)
// 初始化日志配置
func initLogger() (*zap.Logger, error) {
encoderConfig := zapcore.EncoderConfig{
TimeKey: "ts",
LevelKey: "level",
NameKey: "logger",
CallerKey: "caller",
MessageKey: "msg",
StacktraceKey: "stacktrace",
LineEnding: zapcore.DefaultLineEnding,
EncodeLevel: zapcore.LowercaseLevelEncoder,
EncodeTime: zapcore.EpochMillisTimeEncoder,
EncodeDuration: zapcore.SecondsDurationEncoder,
}
core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderConfig), os.Stdout, zapcore.InfoLevel)
return zap.New(core), nil
}
// 爬取指定URL的所有链接
func crawl(logger *zap.Logger, url string) ([]string, error) {
resp, err := http.Get(url)
if err != nil {
logger.Error("Error fetching URL", zap.Error(err), zap.String("url", url))
return nil, err
}
defer resp.Body.Close()
doc, err := html.Parse(resp.Body)
if err != nil {
logger.Error("Error parsing HTML", zap.Error(err), zap.String("url", url))
return nil, err
}
var links []string
var f func(*html.Node)
f = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" {
for _, a := range n.Attr {
if a.Key != "href" {
continue
}
link := a.Val
if link != "" && !strings.HasPrefix(link, "http") {
links = append(links, link)
}
}
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
f(c)
}
}
f(doc)
return links, nil
}
func main() {
logger, err := initLogger()
if err != nil {
log.Fatalf("Error initializing logger: %v", err)
}
defer logger.Sync()
start := time.Now()
siteURL := "https://example.com"
links, err := crawl(logger, siteURL)
if err != nil {
logger.Error("Crawl failed", zap.Error(err), zap.String("siteURL", siteURL))
return
}
logger.Info("Crawl successful",
zap.Int("num_links", len(links)),
zap.String("siteURL", siteURL),
zap.Duration("took", time.Since(start)),
)
for _, link := range links {
fmt.Println(link)
}
}
这段代码使用了Uber的Zap日志库来替换标准库的log,并使用了一个简单的HTML解析函数来抓取指定URL页面上的所有链接。代码示例中的crawl
函数实现了爬取逻辑,并且使用了一个递归函数来遍历HTML文档树。这个例子教导了如何使用Go语言进行基本的网络爬虫编写,并且展示了如何使用Zap日志库记录有意义的
评论已关闭