简单创建基于Go1.19的站点模板爬虫
package main
import (
"fmt"
"log"
"net/http"
"os"
"time"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"golang.org/x/net/html"
)
// 初始化日志
func initLogger() (*zap.Logger, error) {
encoderConfig := zapcore.EncoderConfig{
TimeKey: "ts",
LevelKey: "level",
NameKey: "logger",
CallerKey: "caller",
MessageKey: "msg",
StacktraceKey: "stacktrace",
LineEnding: zapcore.DefaultLineEnding,
EncodeLevel: zapcore.LowercaseLevelEncoder,
EncodeTime: zapcore.EpochMillisTimeEncoder,
EncodeDuration: zapcore.SecondsDurationEncoder,
}
core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderConfig), os.Stdout, zapcore.InfoLevel)
return zap.New(core), nil
}
// 解析HTML并提取链接
func extractLinks(doc *html.Node) (links []string) {
var f func(*html.Node)
f = func(n *html.Node) {
if n.Type == html.ElementNode && n.Data == "a" {
for _, a := range n.Attr {
if a.Key == "href" {
links = append(links, a.Val)
}
}
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
f(c)
}
}
f(doc)
return links
}
// 爬取指定URL的链接
func crawl(logger *zap.Logger, url string) ([]string, error) {
logger.Info("Crawling", zap.String("url", url))
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("non-200 status code: %v", resp.StatusCode)
}
doc, err := html.Parse(resp.Body)
if err != nil {
return nil, err
}
links := extractLinks(doc)
logger.Info("Found links", zap.Int("count", len(links)), zap.Strings("links", links))
return links, nil
}
func main() {
logger, err := initLogger()
if err != nil {
log.Fatalf("Failed to init logger: %v", err)
}
defer logger.Sync()
start := time.Now()
links, err := crawl(logger, "https://example.com")
if err != nil {
logger.Error("Crawl failed", zap.Error(err))
} else {
logger.Info("Crawl successful", zap.Duration("took", time.Since(start)), zap.Int("num_links", len(links)))
}
}
这段代码使用了Go 1.19的特性,初始化了一个Zap日志器,并使用html包解析HTML文档以提取链接。这个简单的爬虫示例展示了如何使用Go语言进行基本的网络爬虫工作。
评论已关闭