Selaa lähdekoodia

first version

light 7 kuukautta sitten
vanhempi
commit
2a78fc5309
8 muutettua tiedostoa jossa 341 lisäystä ja 1 poistoa
  1. 2 1
      .gitignore
  2. 12 0
      README.MD
  3. 33 0
      config/config.go
  4. 56 0
      main.go
  5. 77 0
      models/task.go
  6. 106 0
      services/processor.go
  7. 35 0
      utils/http_client.go
  8. 20 0
      utils/logger.go

+ 2 - 1
.gitignore

@@ -2,7 +2,8 @@
 .vscode/
 *.swp
 *.swo
-
+go.mod
+go.sum
 # 本地环境文件
 .env
 .env.example

+ 12 - 0
README.MD

@@ -0,0 +1,12 @@
+# 项目结构
+
+├── config
+│   └── config.go       # 配置加载
+├── models
+│   └── task.go         # 数据库模型
+├── services
+│   └── processor.go    # 任务处理逻辑
+├── utils
+│   ├── logger.go       # 日志配置
+│   └── http_client.go  # HTTP客户端
+└── main.go

+ 33 - 0
config/config.go

@@ -0,0 +1,33 @@
+package config
+
+import (
+	"github.com/caarlos0/env/v9"
+	"github.com/joho/godotenv" // 新增依赖
+)
+
+type Config struct {
+	// 数据库配置
+	DBHost     string `env:"DB_HOST" envDefault:"localhost"`
+	DBPort     int    `env:"DB_PORT" envDefault:"3306"`
+	DBUser     string `env:"DB_USER" envDefault:"root"`
+	DBPassword string `env:"DB_PASSWORD" envDefault:"password"`
+	DBName     string `env:"DB_NAME" envDefault:"tasks"`
+	//服务器配置
+	Concurrency int    `env:"CONCURRENCY" envDefault:"20"`
+	Interval    string `env:"INTERVAL" envDefault:"10m"`
+	//日志配置
+	LogLevel string `env:"LOG_LEVEL" envDefault:"info"`
+	//业务配置
+	HgApiUrl string `env:"HG_API_URL" envDefault:"https://partner.huoli.com"`
+}
+
+func LoadConfig() (*Config, error) {
+	// 加载.env文件
+	_ = godotenv.Load() // 自动从项目根目录加载.env
+
+	cfg := &Config{}
+	if err := env.Parse(cfg); err != nil {
+		return nil, err
+	}
+	return cfg, nil
+}

+ 56 - 0
main.go

@@ -0,0 +1,56 @@
+// main.go
+package main
+
+import (
+	"context"
+	"go-policy-service/config"
+	"go-policy-service/models"
+	"go-policy-service/services"
+	"go-policy-service/utils"
+	"time"
+
+	"github.com/robfig/cron/v3"
+)
+
+func main() {
+	// 初始化配置
+	cfg, err := config.LoadConfig()
+	if err != nil {
+		panic("Failed to load config: " + err.Error())
+	}
+
+	// 初始化日志
+	utils.InitLogger(cfg.LogLevel)
+
+	// 初始化数据库连接
+	if err := models.InitDB(cfg); err != nil {
+		utils.Logger.Fatal("Failed to connect database: ", err)
+	}
+
+	// 创建处理器
+	processor := services.NewTaskProcessor(
+		utils.NewHttpClient(30*time.Second),
+		cfg.Concurrency,
+	)
+
+	// 设置定时任务
+	c := cron.New()
+	_, err = c.AddFunc("@every "+cfg.Interval, func() {
+		utils.Logger.Info("Starting scheduled task processing...")
+		ctx, cancel := context.WithTimeout(context.Background(), 9*time.Minute)
+		defer cancel()
+
+		if err := processor.ProcessTasks(ctx); err != nil {
+			utils.Logger.Error("Task processing failed: ", err)
+		}
+	})
+	if err != nil {
+		utils.Logger.Fatal("Failed to schedule task: ", err)
+	}
+
+	c.Start()
+	utils.Logger.Info("Service started successfully")
+
+	// 保持主进程运行
+	select {}
+}

+ 77 - 0
models/task.go

@@ -0,0 +1,77 @@
+// models/task.go
+package models
+
+import (
+	"context"
+	"fmt"
+	"go-policy-service/config"
+	"time"
+
+	"gorm.io/driver/mysql"
+	"gorm.io/gorm"
+)
+
+var db *gorm.DB
+
+type Task struct {
+	gorm.Model
+	Params    string `gorm:"type:text"`
+	Status    string `gorm:"type:varchar(20)"`
+	Result    string `gorm:"type:text"`
+	Attempts  int    `gorm:"default:0"`
+	NextTryAt time.Time
+}
+
+type ProcessedData struct {
+	gorm.Model
+	TaskID uint
+	Data   string `gorm:"type:text"`
+}
+
+func InitDB(cfg *config.Config) error {
+	dsn := getDSN(cfg)
+	conn, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})
+	if err != nil {
+		return err
+	}
+
+	db = conn
+	// 自动迁移表结构
+	if err := db.AutoMigrate(&Task{}, &ProcessedData{}); err != nil {
+		return err
+	}
+	return nil
+}
+
+func getDSN(cfg *config.Config) string {
+	return fmt.Sprintf("%s:%s@tcp(%s:%d)/%s?charset=utf8mb4",
+		cfg.DBUser,
+		cfg.DBPassword,
+		cfg.DBHost,
+		cfg.DBPort,
+		cfg.DBName)
+}
+
+func GetPendingTasks(ctx context.Context, limit int) ([]Task, error) {
+	var tasks []Task
+	result := db.WithContext(ctx).
+		Where("status = ? AND next_try_at <= ?", "pending", time.Now()).
+		Limit(limit).
+		Find(&tasks)
+	return tasks, result.Error
+}
+
+func SaveProcessedData(ctx context.Context, data *ProcessedData) error {
+	return db.WithContext(ctx).Create(data).Error
+}
+
+func UpdateTaskStatus(ctx context.Context, taskID uint, status string, attempts int) error {
+	return db.WithContext(ctx).
+		Model(&Task{}).
+		Where("id = ?", taskID).
+		Updates(map[string]interface{}{
+			"status":      status,
+			"attempts":    attempts,
+			"next_try_at": time.Now().Add(5 * time.Minute),
+		}).Error
+}

+ 106 - 0
services/processor.go

@@ -0,0 +1,106 @@
+// services/processor.go
+package services
+
+import (
+	"context"
+	"go-policy-service/models"
+	"go-policy-service/utils"
+	"sync"
+)
+
+type TaskProcessor struct {
+	httpClient  utils.HTTPClient
+	concurrency int
+}
+
+func NewTaskProcessor(httpClient utils.HTTPClient, concurrency int) *TaskProcessor {
+	return &TaskProcessor{
+		httpClient:  httpClient,
+		concurrency: concurrency,
+	}
+}
+
+func (p *TaskProcessor) ProcessTasks(ctx context.Context) error {
+	// 获取待处理任务
+	tasks, err := models.GetPendingTasks(ctx, 1000) // 每次最多处理1000个任务
+	if err != nil {
+		return err
+	}
+
+	// 创建任务通道
+	taskChan := make(chan models.Task)
+	var wg sync.WaitGroup
+
+	// 启动worker
+	for i := 0; i < p.concurrency; i++ {
+		wg.Add(1)
+		go p.worker(ctx, &wg, taskChan)
+	}
+
+	// 分发任务
+	for _, task := range tasks {
+		select {
+		case taskChan <- task:
+		case <-ctx.Done():
+			close(taskChan)
+			wg.Wait()
+			return ctx.Err()
+		}
+	}
+
+	close(taskChan)
+	wg.Wait()
+	return nil
+}
+
+func (p *TaskProcessor) worker(ctx context.Context, wg *sync.WaitGroup, tasks <-chan models.Task) {
+	defer wg.Done()
+
+	for task := range tasks {
+		select {
+		case <-ctx.Done():
+			return
+		default:
+			p.processTask(ctx, task)
+		}
+	}
+}
+
+func (p *TaskProcessor) processTask(ctx context.Context, task models.Task) {
+	// 调用第三方接口
+	resp, err := p.httpClient.PostJSON(ctx, "https://partner.huoli.com/distribution/api/shopping/flight/list?token=", task.Params)
+	if err != nil {
+		utils.Logger.WithField("task_id", task.ID).Error("API request failed: ", err)
+		//models.UpdateTaskStatus(ctx, task.ID, "failed", task.Attempts+1)
+		return
+	}
+
+	// 处理响应数据
+	processedData, err := processResponse(resp)
+	if err != nil {
+		utils.Logger.WithField("task_id", task.ID).Error("Response processing failed: ", err)
+		//models.UpdateTaskStatus(ctx, task.ID, "failed", task.Attempts+1)
+		return
+	}
+
+	// 保存处理后的数据
+	if err := models.SaveProcessedData(ctx, &models.ProcessedData{
+		TaskID: task.ID,
+		Data:   processedData,
+	}); err != nil {
+		utils.Logger.WithField("task_id", task.ID).Error("Failed to save processed data: ", err)
+		return
+	}
+
+	// 更新任务状态
+	// if err := models.UpdateTaskStatus(ctx, task.ID, "completed", task.Attempts+1); err != nil {
+	// 	utils.Logger.WithField("task_id", task.ID).Error("Failed to update task status: ", err)
+	// }
+}
+
+func processResponse(response []byte) (string, error) {
+	// 实现具体的响应处理逻辑
+	
+	// 示例:直接返回原始响应
+	return string(response), nil
+}

+ 35 - 0
utils/http_client.go

@@ -0,0 +1,35 @@
+// utils/http_client.go
+package utils
+
+import (
+	"context"
+	"time"
+	"net/http"
+)
+
+type HTTPClient interface {
+	PostJSON(ctx context.Context, url string, body interface{}) ([]byte, error)
+}
+
+type httpClient struct {
+	client *http.Client
+}
+
+func NewHttpClient(timeout time.Duration) HTTPClient {
+	return &httpClient{
+		client: &http.Client{
+			Timeout:   timeout,
+			Transport: &http.Transport{
+				MaxIdleConns:        100,
+				IdleConnTimeout:     90 * time.Second,
+				DisableCompression: true,
+			},
+		},
+	}
+}
+
+func (c *httpClient) PostJSON(ctx context.Context, url string, body interface{}) ([]byte, error) {
+	// 实现具体的HTTP请求逻辑
+	// 示例:返回空响应
+	return []byte("{}"), nil
+}

+ 20 - 0
utils/logger.go

@@ -0,0 +1,20 @@
+// utils/logger.go
+package utils
+
+import (
+	"github.com/sirupsen/logrus"
+)
+
+var Logger *logrus.Logger
+
+func InitLogger(level string) {
+	Logger = logrus.New()
+	Logger.SetFormatter(&logrus.JSONFormatter{})
+
+	logLevel, err := logrus.ParseLevel(level)
+	if err != nil {
+		Logger.SetLevel(logrus.InfoLevel)
+	} else {
+		Logger.SetLevel(logLevel)
+	}
+}