feat(app): Implement comprehensive application lifecycle management
- Add new `app` package to manage application initialization and lifecycle - Refactor `main.go` to use new application management approach - Implement graceful shutdown with context timeout and signal handling - Add dependency injection container initialization - Enhance logging with configurable log levels and structured logging - Update configuration loading and server initialization process - Modify Jitsi configuration in `.env` for custom deployment - Improve error handling and logging throughout application startup - Centralize application startup and shutdown logic in single package Introduces a more robust and flexible application management system with improved initialization, logging, and shutdown capabilities.
This commit is contained in:
parent
a7cd28f3af
commit
ddfa2de49e
7
.env
7
.env
@ -27,7 +27,8 @@ SMTP_PASSWORD=your_app_password
|
||||
SMTP_FROM=your_email@gmail.com
|
||||
|
||||
# Jitsi Configuration
|
||||
JITSI_BASE_URL=https://meet.jit.si
|
||||
# JITSI_BASE_URL=https://meet.jit.si
|
||||
JITSI_BASE_URL=https://meet.attunehearttherapy.com
|
||||
JITSI_API_KEY=your_jitsi_api_key
|
||||
JITSI_APP_ID=your_jitsi_app_id
|
||||
JITSI_PRIVATE_KEY=your_jitsi_private_key
|
||||
JITSI_APP_ID=attunehearttherapy_id
|
||||
JITSI_PRIVATE_KEY=attunehearttherapy_jitsi_private_key
|
||||
@ -1,62 +1,31 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"attune-heart-therapy/internal/config"
|
||||
"attune-heart-therapy/internal/server"
|
||||
"attune-heart-therapy/internal/app"
|
||||
"attune-heart-therapy/internal/logger"
|
||||
|
||||
"github.com/joho/godotenv"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Initialize logger
|
||||
log := logger.New("main")
|
||||
|
||||
// Load environment variables
|
||||
if err := godotenv.Load(); err != nil {
|
||||
log.Println("No .env file found, using system environment variables")
|
||||
log.Warn("No .env file found, using system environment variables")
|
||||
}
|
||||
|
||||
// Load configuration
|
||||
cfg, err := config.Load()
|
||||
// Create and run application
|
||||
application, err := app.New()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load configuration: %v", err)
|
||||
log.Fatal("Failed to create application", err)
|
||||
}
|
||||
|
||||
// Initialize server
|
||||
srv := server.New(cfg)
|
||||
|
||||
// Setup graceful shutdown
|
||||
quit := make(chan os.Signal, 1)
|
||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
// Start server in a goroutine
|
||||
go func() {
|
||||
if err := srv.Start(); err != nil && err != http.ErrServerClosed {
|
||||
log.Fatalf("Failed to start server: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Println("Server started successfully")
|
||||
|
||||
// Wait for interrupt signal
|
||||
<-quit
|
||||
log.Println("Shutting down server...")
|
||||
|
||||
// Create a deadline for shutdown
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Shutdown server gracefully
|
||||
if err := srv.Shutdown(); err != nil {
|
||||
log.Printf("Error during server shutdown: %v", err)
|
||||
// Run the application with graceful shutdown handling
|
||||
if err := application.Run(); err != nil {
|
||||
log.Fatal("Application failed", err)
|
||||
}
|
||||
|
||||
// Wait for context deadline or completion
|
||||
<-ctx.Done()
|
||||
log.Println("Server shutdown completed")
|
||||
log.Info("Application shutdown completed")
|
||||
}
|
||||
|
||||
211
internal/app/app.go
Normal file
211
internal/app/app.go
Normal file
@ -0,0 +1,211 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"attune-heart-therapy/internal/config"
|
||||
"attune-heart-therapy/internal/container"
|
||||
"attune-heart-therapy/internal/logger"
|
||||
"attune-heart-therapy/internal/server"
|
||||
)
|
||||
|
||||
// Application represents the main application
|
||||
type Application struct {
|
||||
config *config.Config
|
||||
container *container.Container
|
||||
server *server.Server
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
// New creates a new application instance
|
||||
func New() (*Application, error) {
|
||||
// Initialize logger first
|
||||
log := logger.New("application")
|
||||
|
||||
// Load configuration
|
||||
cfg, err := config.Load()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load configuration: %w", err)
|
||||
}
|
||||
|
||||
// Set log level based on configuration
|
||||
logLevel := logger.INFO
|
||||
switch cfg.Logging.Level {
|
||||
case "DEBUG":
|
||||
logLevel = logger.DEBUG
|
||||
case "INFO":
|
||||
logLevel = logger.INFO
|
||||
case "WARN":
|
||||
logLevel = logger.WARN
|
||||
case "ERROR":
|
||||
logLevel = logger.ERROR
|
||||
case "FATAL":
|
||||
logLevel = logger.FATAL
|
||||
}
|
||||
logger.SetGlobalLevel(logLevel)
|
||||
|
||||
log.Info("Application configuration loaded", map[string]interface{}{
|
||||
"server_host": cfg.Server.Host,
|
||||
"server_port": cfg.Server.Port,
|
||||
"db_host": cfg.Database.Host,
|
||||
"db_name": cfg.Database.Name,
|
||||
})
|
||||
|
||||
// Initialize dependency injection container
|
||||
cont := container.New(cfg)
|
||||
|
||||
// Initialize server
|
||||
srv := server.New(cfg)
|
||||
|
||||
return &Application{
|
||||
config: cfg,
|
||||
container: cont,
|
||||
server: srv,
|
||||
log: log,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Initialize performs application initialization
|
||||
func (app *Application) Initialize() error {
|
||||
app.log.Info("Initializing application...")
|
||||
|
||||
// Initialize all dependencies through the container
|
||||
if err := app.container.Initialize(); err != nil {
|
||||
return fmt.Errorf("failed to initialize dependencies: %w", err)
|
||||
}
|
||||
|
||||
// Wire the container to the server
|
||||
app.server.SetContainer(app.container)
|
||||
|
||||
app.log.Info("Application initialization completed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run starts the application and handles graceful shutdown
|
||||
func (app *Application) Run() error {
|
||||
// Initialize the application
|
||||
if err := app.Initialize(); err != nil {
|
||||
return fmt.Errorf("application initialization failed: %w", err)
|
||||
}
|
||||
|
||||
// Setup graceful shutdown
|
||||
quit := make(chan os.Signal, 1)
|
||||
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
// Start server in a goroutine
|
||||
serverErrors := make(chan error, 1)
|
||||
go func() {
|
||||
app.log.Info("Starting HTTP server...")
|
||||
if err := app.server.Start(); err != nil {
|
||||
serverErrors <- fmt.Errorf("server start failed: %w", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for either shutdown signal or server error
|
||||
select {
|
||||
case err := <-serverErrors:
|
||||
return err
|
||||
case sig := <-quit:
|
||||
app.log.Info("Received shutdown signal", map[string]interface{}{
|
||||
"signal": sig.String(),
|
||||
})
|
||||
}
|
||||
|
||||
// Graceful shutdown
|
||||
return app.shutdown()
|
||||
}
|
||||
|
||||
// shutdown performs graceful shutdown of all components
|
||||
func (app *Application) shutdown() error {
|
||||
app.log.Info("Initiating graceful shutdown...")
|
||||
|
||||
// Create shutdown context with timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var shutdownErrors []error
|
||||
|
||||
// Shutdown server first
|
||||
if app.server != nil {
|
||||
app.log.Info("Shutting down HTTP server...")
|
||||
if err := app.server.Shutdown(ctx); err != nil {
|
||||
app.log.Error("Error shutting down server", err)
|
||||
shutdownErrors = append(shutdownErrors, fmt.Errorf("server shutdown error: %w", err))
|
||||
} else {
|
||||
app.log.Info("HTTP server shutdown completed")
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown application dependencies
|
||||
if app.container != nil {
|
||||
app.log.Info("Shutting down application dependencies...")
|
||||
if err := app.container.Shutdown(); err != nil {
|
||||
app.log.Error("Error shutting down dependencies", err)
|
||||
shutdownErrors = append(shutdownErrors, err)
|
||||
} else {
|
||||
app.log.Info("Dependencies shutdown completed")
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for context deadline or completion
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
app.log.Warn("Shutdown timeout exceeded, forcing exit")
|
||||
shutdownErrors = append(shutdownErrors, fmt.Errorf("shutdown timeout exceeded"))
|
||||
}
|
||||
default:
|
||||
// Shutdown completed before timeout
|
||||
}
|
||||
|
||||
if len(shutdownErrors) > 0 {
|
||||
app.log.Error("Graceful shutdown completed with errors", nil, map[string]interface{}{
|
||||
"error_count": len(shutdownErrors),
|
||||
})
|
||||
return fmt.Errorf("shutdown completed with %d errors: %v", len(shutdownErrors), shutdownErrors)
|
||||
}
|
||||
|
||||
app.log.Info("Graceful shutdown completed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetConfig returns the application configuration
|
||||
func (app *Application) GetConfig() *config.Config {
|
||||
return app.config
|
||||
}
|
||||
|
||||
// GetContainer returns the dependency injection container
|
||||
func (app *Application) GetContainer() *container.Container {
|
||||
return app.container
|
||||
}
|
||||
|
||||
// HealthCheck performs a comprehensive health check
|
||||
func (app *Application) HealthCheck() map[string]interface{} {
|
||||
health := map[string]interface{}{
|
||||
"status": "ok",
|
||||
"timestamp": time.Now().UTC().Format(time.RFC3339),
|
||||
"service": "Video Conference Booking System",
|
||||
"version": "1.0.0",
|
||||
}
|
||||
|
||||
// Check container health
|
||||
if app.container != nil {
|
||||
containerHealth := app.container.HealthCheck()
|
||||
health["components"] = containerHealth
|
||||
|
||||
// Update overall status based on component health
|
||||
if status, ok := containerHealth["status"].(string); ok && status != "ok" {
|
||||
health["status"] = status
|
||||
}
|
||||
} else {
|
||||
health["status"] = "error"
|
||||
health["error"] = "Container not initialized"
|
||||
}
|
||||
|
||||
return health
|
||||
}
|
||||
@ -13,6 +13,7 @@ type Config struct {
|
||||
Stripe StripeConfig
|
||||
SMTP SMTPConfig
|
||||
Jitsi JitsiConfig
|
||||
Logging LoggingConfig
|
||||
}
|
||||
|
||||
type ServerConfig struct {
|
||||
@ -55,6 +56,15 @@ type JitsiConfig struct {
|
||||
PrivateKey string
|
||||
}
|
||||
|
||||
type LoggingConfig struct {
|
||||
Level string
|
||||
Format string
|
||||
Output string
|
||||
MaxSize int
|
||||
MaxBackups int
|
||||
MaxAge int
|
||||
}
|
||||
|
||||
func Load() (*Config, error) {
|
||||
cfg := &Config{
|
||||
Server: ServerConfig{
|
||||
@ -91,6 +101,14 @@ func Load() (*Config, error) {
|
||||
AppID: getEnv("JITSI_APP_ID", ""),
|
||||
PrivateKey: getEnv("JITSI_PRIVATE_KEY", ""),
|
||||
},
|
||||
Logging: LoggingConfig{
|
||||
Level: getEnv("LOG_LEVEL", "INFO"),
|
||||
Format: getEnv("LOG_FORMAT", "json"),
|
||||
Output: getEnv("LOG_OUTPUT", "stdout"),
|
||||
MaxSize: getEnvInt("LOG_MAX_SIZE", 100),
|
||||
MaxBackups: getEnvInt("LOG_MAX_BACKUPS", 3),
|
||||
MaxAge: getEnvInt("LOG_MAX_AGE", 28),
|
||||
},
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
|
||||
338
internal/container/container.go
Normal file
338
internal/container/container.go
Normal file
@ -0,0 +1,338 @@
|
||||
package container
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"attune-heart-therapy/internal/config"
|
||||
"attune-heart-therapy/internal/database"
|
||||
"attune-heart-therapy/internal/handlers"
|
||||
"attune-heart-therapy/internal/logger"
|
||||
"attune-heart-therapy/internal/repositories"
|
||||
"attune-heart-therapy/internal/services"
|
||||
)
|
||||
|
||||
// Container holds all application dependencies
|
||||
type Container struct {
|
||||
Config *config.Config
|
||||
Database *database.DB
|
||||
Log *logger.Logger
|
||||
|
||||
// Repositories
|
||||
Repositories *repositories.Repositories
|
||||
|
||||
// Services
|
||||
JWTService services.JWTService
|
||||
UserService services.UserService
|
||||
BookingService services.BookingService
|
||||
PaymentService services.PaymentService
|
||||
NotificationService services.NotificationService
|
||||
JitsiService services.JitsiService
|
||||
AdminService services.AdminService
|
||||
JobManagerService services.JobManagerService
|
||||
|
||||
// Handlers
|
||||
AuthHandler *handlers.AuthHandler
|
||||
BookingHandler *handlers.BookingHandler
|
||||
PaymentHandler *handlers.PaymentHandler
|
||||
AdminHandler *handlers.AdminHandler
|
||||
}
|
||||
|
||||
// New creates a new dependency injection container
|
||||
func New(cfg *config.Config) *Container {
|
||||
return &Container{
|
||||
Config: cfg,
|
||||
Log: logger.New("container"),
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize sets up all dependencies in the correct order
|
||||
func (c *Container) Initialize() error {
|
||||
c.Log.Info("Initializing application dependencies...")
|
||||
|
||||
// Initialize database connection
|
||||
if err := c.initializeDatabase(); err != nil {
|
||||
return fmt.Errorf("failed to initialize database: %w", err)
|
||||
}
|
||||
|
||||
// Initialize repositories
|
||||
c.initializeRepositories()
|
||||
|
||||
// Initialize services
|
||||
if err := c.initializeServices(); err != nil {
|
||||
return fmt.Errorf("failed to initialize services: %w", err)
|
||||
}
|
||||
|
||||
// Initialize handlers
|
||||
c.initializeHandlers()
|
||||
|
||||
c.Log.Info("Application dependencies initialized successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// initializeDatabase sets up database connection and runs migrations
|
||||
func (c *Container) initializeDatabase() error {
|
||||
c.Log.Info("Initializing database connection...")
|
||||
|
||||
db, err := database.New(c.Config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create database connection: %w", err)
|
||||
}
|
||||
c.Database = db
|
||||
|
||||
// Run database migrations
|
||||
c.Log.Info("Running database migrations...")
|
||||
if err := c.Database.Migrate(); err != nil {
|
||||
return fmt.Errorf("failed to run database migrations: %w", err)
|
||||
}
|
||||
|
||||
// Seed database with initial data
|
||||
c.Log.Info("Seeding database with initial data...")
|
||||
if err := c.Database.Seed(); err != nil {
|
||||
return fmt.Errorf("failed to seed database: %w", err)
|
||||
}
|
||||
|
||||
c.Log.Info("Database initialization completed")
|
||||
return nil
|
||||
}
|
||||
|
||||
// initializeRepositories sets up all repository instances
|
||||
func (c *Container) initializeRepositories() {
|
||||
c.Log.Info("Initializing repositories...")
|
||||
c.Repositories = c.Database.GetRepositories()
|
||||
c.Log.Info("Repositories initialized")
|
||||
}
|
||||
|
||||
// initializeServices sets up all service instances with proper dependency injection
|
||||
func (c *Container) initializeServices() error {
|
||||
c.Log.Info("Initializing services...")
|
||||
|
||||
// Initialize JWT service (no dependencies)
|
||||
c.JWTService = services.NewJWTService(c.Config.JWT.Secret, c.Config.JWT.Expiration)
|
||||
|
||||
// Initialize Jitsi service (no dependencies)
|
||||
c.JitsiService = services.NewJitsiService(&c.Config.Jitsi)
|
||||
|
||||
// Initialize notification service (depends on notification repository and config)
|
||||
c.NotificationService = services.NewNotificationService(c.Repositories.Notification, c.Config)
|
||||
|
||||
// Initialize user service (depends on user repository, JWT service, and notification service)
|
||||
c.UserService = services.NewUserService(c.Repositories.User, c.JWTService, c.NotificationService)
|
||||
|
||||
// Initialize payment service (depends on config, booking/user repositories, and notification service)
|
||||
c.PaymentService = services.NewPaymentService(c.Config, c.Repositories.Booking, c.Repositories.User, c.NotificationService)
|
||||
|
||||
// Initialize job manager service (depends on notification service and repositories)
|
||||
c.JobManagerService = services.NewJobManagerService(c.NotificationService, c.Repositories.Booking, c.Repositories.User)
|
||||
|
||||
// Start the job manager
|
||||
c.Log.Info("Starting job manager...")
|
||||
if err := c.JobManagerService.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start job manager: %w", err)
|
||||
}
|
||||
c.Log.Info("Job manager started successfully")
|
||||
|
||||
// Initialize booking service (depends on multiple repositories and services)
|
||||
c.BookingService = services.NewBookingService(
|
||||
c.Repositories.Booking,
|
||||
c.Repositories.Schedule,
|
||||
c.Repositories.User,
|
||||
c.JitsiService,
|
||||
c.PaymentService,
|
||||
c.NotificationService,
|
||||
c.JobManagerService,
|
||||
)
|
||||
|
||||
// Initialize admin service (depends on repositories)
|
||||
c.AdminService = services.NewAdminService(c.Repositories.User, c.Repositories.Booking, c.Repositories.Schedule)
|
||||
|
||||
c.Log.Info("Services initialized successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// initializeHandlers sets up all HTTP handlers with service dependencies
|
||||
func (c *Container) initializeHandlers() {
|
||||
c.Log.Info("Initializing handlers...")
|
||||
|
||||
c.AuthHandler = handlers.NewAuthHandler(c.UserService)
|
||||
c.BookingHandler = handlers.NewBookingHandler(c.BookingService)
|
||||
c.PaymentHandler = handlers.NewPaymentHandler(c.PaymentService)
|
||||
c.AdminHandler = handlers.NewAdminHandler(c.AdminService)
|
||||
|
||||
c.Log.Info("Handlers initialized successfully")
|
||||
}
|
||||
|
||||
// Shutdown gracefully shuts down all services and connections
|
||||
func (c *Container) Shutdown() error {
|
||||
c.Log.Info("Shutting down application dependencies...")
|
||||
|
||||
var shutdownErrors []error
|
||||
|
||||
// Stop job manager first
|
||||
if c.JobManagerService != nil && c.JobManagerService.IsRunning() {
|
||||
c.Log.Info("Stopping job manager...")
|
||||
if err := c.JobManagerService.Stop(); err != nil {
|
||||
c.Log.Error("Error stopping job manager", err)
|
||||
shutdownErrors = append(shutdownErrors, fmt.Errorf("job manager shutdown error: %w", err))
|
||||
} else {
|
||||
c.Log.Info("Job manager stopped successfully")
|
||||
}
|
||||
}
|
||||
|
||||
// Close database connection
|
||||
if c.Database != nil {
|
||||
c.Log.Info("Closing database connection...")
|
||||
if err := c.Database.Close(); err != nil {
|
||||
c.Log.Error("Error closing database", err)
|
||||
shutdownErrors = append(shutdownErrors, fmt.Errorf("database shutdown error: %w", err))
|
||||
} else {
|
||||
c.Log.Info("Database connection closed successfully")
|
||||
}
|
||||
}
|
||||
|
||||
if len(shutdownErrors) > 0 {
|
||||
return fmt.Errorf("shutdown completed with errors: %v", shutdownErrors)
|
||||
}
|
||||
|
||||
c.Log.Info("Application dependencies shutdown completed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetJWTService returns the JWT service for middleware usage
|
||||
func (c *Container) GetJWTService() services.JWTService {
|
||||
return c.JWTService
|
||||
}
|
||||
|
||||
// HealthCheck performs a health check on all critical dependencies
|
||||
func (c *Container) HealthCheck() map[string]interface{} {
|
||||
health := map[string]interface{}{
|
||||
"status": "ok",
|
||||
}
|
||||
|
||||
// Check database connectivity
|
||||
if c.Database != nil {
|
||||
if err := c.Database.Health(); err != nil {
|
||||
health["database"] = map[string]interface{}{
|
||||
"status": "error",
|
||||
"error": err.Error(),
|
||||
}
|
||||
health["status"] = "degraded"
|
||||
} else {
|
||||
health["database"] = map[string]interface{}{
|
||||
"status": "healthy",
|
||||
}
|
||||
}
|
||||
} else {
|
||||
health["database"] = map[string]interface{}{
|
||||
"status": "not_initialized",
|
||||
}
|
||||
health["status"] = "error"
|
||||
}
|
||||
|
||||
// Check job manager status
|
||||
if c.JobManagerService != nil {
|
||||
isRunning := c.JobManagerService.IsRunning()
|
||||
health["job_manager"] = map[string]interface{}{
|
||||
"status": "healthy",
|
||||
"running": isRunning,
|
||||
}
|
||||
if !isRunning {
|
||||
health["job_manager"].(map[string]interface{})["status"] = "unhealthy"
|
||||
health["status"] = "degraded"
|
||||
}
|
||||
} else {
|
||||
health["job_manager"] = map[string]interface{}{
|
||||
"status": "not_initialized",
|
||||
}
|
||||
health["status"] = "degraded"
|
||||
}
|
||||
|
||||
// Check services initialization
|
||||
servicesHealth := map[string]interface{}{}
|
||||
|
||||
if c.UserService != nil {
|
||||
servicesHealth["user_service"] = "initialized"
|
||||
} else {
|
||||
servicesHealth["user_service"] = "not_initialized"
|
||||
health["status"] = "degraded"
|
||||
}
|
||||
|
||||
if c.BookingService != nil {
|
||||
servicesHealth["booking_service"] = "initialized"
|
||||
} else {
|
||||
servicesHealth["booking_service"] = "not_initialized"
|
||||
health["status"] = "degraded"
|
||||
}
|
||||
|
||||
if c.PaymentService != nil {
|
||||
servicesHealth["payment_service"] = "initialized"
|
||||
} else {
|
||||
servicesHealth["payment_service"] = "not_initialized"
|
||||
health["status"] = "degraded"
|
||||
}
|
||||
|
||||
if c.NotificationService != nil {
|
||||
servicesHealth["notification_service"] = "initialized"
|
||||
} else {
|
||||
servicesHealth["notification_service"] = "not_initialized"
|
||||
health["status"] = "degraded"
|
||||
}
|
||||
|
||||
health["services"] = servicesHealth
|
||||
|
||||
return health
|
||||
}
|
||||
|
||||
// IsInitialized returns true if the container has been initialized
|
||||
func (c *Container) IsInitialized() bool {
|
||||
return c.Database != nil && c.Repositories != nil && c.UserService != nil
|
||||
}
|
||||
|
||||
// GetDependencyStatus returns the status of all dependencies
|
||||
func (c *Container) GetDependencyStatus() map[string]string {
|
||||
status := make(map[string]string)
|
||||
|
||||
// Database status
|
||||
if c.Database != nil {
|
||||
if err := c.Database.Health(); err != nil {
|
||||
status["database"] = "unhealthy"
|
||||
} else {
|
||||
status["database"] = "healthy"
|
||||
}
|
||||
} else {
|
||||
status["database"] = "not_initialized"
|
||||
}
|
||||
|
||||
// Services status
|
||||
services := []struct {
|
||||
name string
|
||||
service interface{}
|
||||
}{
|
||||
{"user_service", c.UserService},
|
||||
{"booking_service", c.BookingService},
|
||||
{"payment_service", c.PaymentService},
|
||||
{"notification_service", c.NotificationService},
|
||||
{"jitsi_service", c.JitsiService},
|
||||
{"admin_service", c.AdminService},
|
||||
{"job_manager_service", c.JobManagerService},
|
||||
{"jwt_service", c.JWTService},
|
||||
}
|
||||
|
||||
for _, svc := range services {
|
||||
if svc.service != nil {
|
||||
status[svc.name] = "initialized"
|
||||
} else {
|
||||
status[svc.name] = "not_initialized"
|
||||
}
|
||||
}
|
||||
|
||||
// Job manager running status
|
||||
if c.JobManagerService != nil {
|
||||
if c.JobManagerService.IsRunning() {
|
||||
status["job_manager_running"] = "true"
|
||||
} else {
|
||||
status["job_manager_running"] = "false"
|
||||
}
|
||||
}
|
||||
|
||||
return status
|
||||
}
|
||||
235
internal/errors/errors.go
Normal file
235
internal/errors/errors.go
Normal file
@ -0,0 +1,235 @@
|
||||
package errors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ErrorCode represents application-specific error codes
|
||||
type ErrorCode string
|
||||
|
||||
const (
|
||||
// Authentication errors
|
||||
ErrCodeInvalidCredentials ErrorCode = "INVALID_CREDENTIALS"
|
||||
ErrCodeTokenExpired ErrorCode = "TOKEN_EXPIRED"
|
||||
ErrCodeTokenInvalid ErrorCode = "TOKEN_INVALID"
|
||||
ErrCodeUnauthorized ErrorCode = "UNAUTHORIZED"
|
||||
ErrCodeForbidden ErrorCode = "FORBIDDEN"
|
||||
|
||||
// Validation errors
|
||||
ErrCodeValidationFailed ErrorCode = "VALIDATION_FAILED"
|
||||
ErrCodeInvalidInput ErrorCode = "INVALID_INPUT"
|
||||
ErrCodeMissingField ErrorCode = "MISSING_FIELD"
|
||||
|
||||
// Resource errors
|
||||
ErrCodeNotFound ErrorCode = "NOT_FOUND"
|
||||
ErrCodeAlreadyExists ErrorCode = "ALREADY_EXISTS"
|
||||
ErrCodeConflict ErrorCode = "CONFLICT"
|
||||
|
||||
// Business logic errors
|
||||
ErrCodeSlotUnavailable ErrorCode = "SLOT_UNAVAILABLE"
|
||||
ErrCodeBookingNotFound ErrorCode = "BOOKING_NOT_FOUND"
|
||||
ErrCodePaymentFailed ErrorCode = "PAYMENT_FAILED"
|
||||
ErrCodePaymentRequired ErrorCode = "PAYMENT_REQUIRED"
|
||||
ErrCodeMeetingCreateFailed ErrorCode = "MEETING_CREATE_FAILED"
|
||||
|
||||
// System errors
|
||||
ErrCodeInternalServer ErrorCode = "INTERNAL_SERVER_ERROR"
|
||||
ErrCodeDatabaseError ErrorCode = "DATABASE_ERROR"
|
||||
ErrCodeExternalAPI ErrorCode = "EXTERNAL_API_ERROR"
|
||||
ErrCodeServiceUnavailable ErrorCode = "SERVICE_UNAVAILABLE"
|
||||
|
||||
// Rate limiting errors
|
||||
ErrCodeRateLimitExceeded ErrorCode = "RATE_LIMIT_EXCEEDED"
|
||||
ErrCodeTooManyRequests ErrorCode = "TOO_MANY_REQUESTS"
|
||||
)
|
||||
|
||||
// AppError represents an application error with structured information
|
||||
type AppError struct {
|
||||
Code ErrorCode `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Details string `json:"details,omitempty"`
|
||||
HTTPStatus int `json:"-"`
|
||||
Fields map[string]interface{} `json:"fields,omitempty"`
|
||||
Cause error `json:"-"`
|
||||
}
|
||||
|
||||
// Error implements the error interface
|
||||
func (e *AppError) Error() string {
|
||||
if e.Details != "" {
|
||||
return fmt.Sprintf("%s: %s - %s", e.Code, e.Message, e.Details)
|
||||
}
|
||||
return fmt.Sprintf("%s: %s", e.Code, e.Message)
|
||||
}
|
||||
|
||||
// Unwrap returns the underlying cause error
|
||||
func (e *AppError) Unwrap() error {
|
||||
return e.Cause
|
||||
}
|
||||
|
||||
// WithDetails adds details to the error
|
||||
func (e *AppError) WithDetails(details string) *AppError {
|
||||
e.Details = details
|
||||
return e
|
||||
}
|
||||
|
||||
// WithField adds a field to the error
|
||||
func (e *AppError) WithField(key string, value interface{}) *AppError {
|
||||
if e.Fields == nil {
|
||||
e.Fields = make(map[string]interface{})
|
||||
}
|
||||
e.Fields[key] = value
|
||||
return e
|
||||
}
|
||||
|
||||
// WithFields adds multiple fields to the error
|
||||
func (e *AppError) WithFields(fields map[string]interface{}) *AppError {
|
||||
if e.Fields == nil {
|
||||
e.Fields = make(map[string]interface{})
|
||||
}
|
||||
for k, v := range fields {
|
||||
e.Fields[k] = v
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// WithCause adds a cause error
|
||||
func (e *AppError) WithCause(cause error) *AppError {
|
||||
e.Cause = cause
|
||||
return e
|
||||
}
|
||||
|
||||
// New creates a new AppError
|
||||
func New(code ErrorCode, message string, httpStatus int) *AppError {
|
||||
return &AppError{
|
||||
Code: code,
|
||||
Message: message,
|
||||
HTTPStatus: httpStatus,
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap wraps an existing error with application error information
|
||||
func Wrap(err error, code ErrorCode, message string, httpStatus int) *AppError {
|
||||
return &AppError{
|
||||
Code: code,
|
||||
Message: message,
|
||||
HTTPStatus: httpStatus,
|
||||
Cause: err,
|
||||
}
|
||||
}
|
||||
|
||||
// Predefined common errors
|
||||
var (
|
||||
// Authentication errors
|
||||
ErrInvalidCredentials = New(ErrCodeInvalidCredentials, "Invalid email or password", http.StatusUnauthorized)
|
||||
ErrTokenExpired = New(ErrCodeTokenExpired, "Authentication token has expired", http.StatusUnauthorized)
|
||||
ErrTokenInvalid = New(ErrCodeTokenInvalid, "Invalid authentication token", http.StatusUnauthorized)
|
||||
ErrUnauthorized = New(ErrCodeUnauthorized, "Authentication required", http.StatusUnauthorized)
|
||||
ErrForbidden = New(ErrCodeForbidden, "Access denied", http.StatusForbidden)
|
||||
|
||||
// Validation errors
|
||||
ErrValidationFailed = New(ErrCodeValidationFailed, "Request validation failed", http.StatusBadRequest)
|
||||
ErrInvalidInput = New(ErrCodeInvalidInput, "Invalid input provided", http.StatusBadRequest)
|
||||
ErrMissingField = New(ErrCodeMissingField, "Required field is missing", http.StatusBadRequest)
|
||||
|
||||
// Resource errors
|
||||
ErrNotFound = New(ErrCodeNotFound, "Resource not found", http.StatusNotFound)
|
||||
ErrAlreadyExists = New(ErrCodeAlreadyExists, "Resource already exists", http.StatusConflict)
|
||||
ErrConflict = New(ErrCodeConflict, "Resource conflict", http.StatusConflict)
|
||||
|
||||
// Business logic errors
|
||||
ErrSlotUnavailable = New(ErrCodeSlotUnavailable, "Selected time slot is not available", http.StatusConflict)
|
||||
ErrBookingNotFound = New(ErrCodeBookingNotFound, "Booking not found", http.StatusNotFound)
|
||||
ErrPaymentFailed = New(ErrCodePaymentFailed, "Payment processing failed", http.StatusPaymentRequired)
|
||||
ErrPaymentRequired = New(ErrCodePaymentRequired, "Payment is required", http.StatusPaymentRequired)
|
||||
ErrMeetingCreateFailed = New(ErrCodeMeetingCreateFailed, "Failed to create meeting room", http.StatusInternalServerError)
|
||||
|
||||
// System errors
|
||||
ErrInternalServer = New(ErrCodeInternalServer, "Internal server error", http.StatusInternalServerError)
|
||||
ErrDatabaseError = New(ErrCodeDatabaseError, "Database operation failed", http.StatusInternalServerError)
|
||||
ErrExternalAPI = New(ErrCodeExternalAPI, "External API error", http.StatusBadGateway)
|
||||
ErrServiceUnavailable = New(ErrCodeServiceUnavailable, "Service temporarily unavailable", http.StatusServiceUnavailable)
|
||||
|
||||
// Rate limiting errors
|
||||
ErrRateLimitExceeded = New(ErrCodeRateLimitExceeded, "Rate limit exceeded", http.StatusTooManyRequests)
|
||||
ErrTooManyRequests = New(ErrCodeTooManyRequests, "Too many requests", http.StatusTooManyRequests)
|
||||
)
|
||||
|
||||
// IsAppError checks if an error is an AppError
|
||||
func IsAppError(err error) bool {
|
||||
_, ok := err.(*AppError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// GetAppError extracts AppError from error, returns nil if not an AppError
|
||||
func GetAppError(err error) *AppError {
|
||||
if appErr, ok := err.(*AppError); ok {
|
||||
return appErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ErrorResponse represents the JSON error response structure
|
||||
type ErrorResponse struct {
|
||||
Error ErrorInfo `json:"error"`
|
||||
}
|
||||
|
||||
// ErrorInfo contains error information for API responses
|
||||
type ErrorInfo struct {
|
||||
Code ErrorCode `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Details string `json:"details,omitempty"`
|
||||
Fields map[string]interface{} `json:"fields,omitempty"`
|
||||
}
|
||||
|
||||
// ToErrorResponse converts an AppError to an ErrorResponse
|
||||
func (e *AppError) ToErrorResponse() ErrorResponse {
|
||||
return ErrorResponse{
|
||||
Error: ErrorInfo{
|
||||
Code: e.Code,
|
||||
Message: e.Message,
|
||||
Details: e.Details,
|
||||
Fields: e.Fields,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ValidationError represents a field validation error
|
||||
type ValidationError struct {
|
||||
Field string `json:"field"`
|
||||
Message string `json:"message"`
|
||||
Value interface{} `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
// ValidationErrors represents multiple validation errors
|
||||
type ValidationErrors []ValidationError
|
||||
|
||||
// Error implements the error interface for ValidationErrors
|
||||
func (ve ValidationErrors) Error() string {
|
||||
if len(ve) == 0 {
|
||||
return "validation failed"
|
||||
}
|
||||
if len(ve) == 1 {
|
||||
return fmt.Sprintf("validation failed: %s %s", ve[0].Field, ve[0].Message)
|
||||
}
|
||||
return fmt.Sprintf("validation failed: %d errors", len(ve))
|
||||
}
|
||||
|
||||
// ToAppError converts ValidationErrors to AppError
|
||||
func (ve ValidationErrors) ToAppError() *AppError {
|
||||
fields := make(map[string]interface{})
|
||||
for _, err := range ve {
|
||||
fields[err.Field] = err.Message
|
||||
}
|
||||
|
||||
return ErrValidationFailed.WithFields(fields).WithDetails(ve.Error())
|
||||
}
|
||||
|
||||
// NewValidationError creates a new validation error
|
||||
func NewValidationError(field, message string, value interface{}) ValidationError {
|
||||
return ValidationError{
|
||||
Field: field,
|
||||
Message: message,
|
||||
Value: value,
|
||||
}
|
||||
}
|
||||
277
internal/health/health.go
Normal file
277
internal/health/health.go
Normal file
@ -0,0 +1,277 @@
|
||||
package health
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"attune-heart-therapy/internal/database"
|
||||
"attune-heart-therapy/internal/logger"
|
||||
"attune-heart-therapy/internal/services"
|
||||
)
|
||||
|
||||
// Status represents the health status of a component
|
||||
type Status string
|
||||
|
||||
const (
|
||||
StatusHealthy Status = "healthy"
|
||||
StatusDegraded Status = "degraded"
|
||||
StatusUnhealthy Status = "unhealthy"
|
||||
StatusUnknown Status = "unknown"
|
||||
)
|
||||
|
||||
// CheckResult represents the result of a health check
|
||||
type CheckResult struct {
|
||||
Status Status `json:"status"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Details map[string]interface{} `json:"details,omitempty"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Duration time.Duration `json:"duration_ms"`
|
||||
}
|
||||
|
||||
// HealthCheck represents a health check function
|
||||
type HealthCheck func(ctx context.Context) CheckResult
|
||||
|
||||
// Checker performs health checks on various system components
|
||||
type Checker struct {
|
||||
checks map[string]HealthCheck
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
// NewChecker creates a new health checker
|
||||
func NewChecker() *Checker {
|
||||
return &Checker{
|
||||
checks: make(map[string]HealthCheck),
|
||||
log: logger.New("health_checker"),
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterCheck registers a health check with a name
|
||||
func (hc *Checker) RegisterCheck(name string, check HealthCheck) {
|
||||
hc.checks[name] = check
|
||||
hc.log.Info("Health check registered", map[string]interface{}{
|
||||
"check_name": name,
|
||||
})
|
||||
}
|
||||
|
||||
// Check performs all registered health checks
|
||||
func (hc *Checker) Check(ctx context.Context) map[string]CheckResult {
|
||||
results := make(map[string]CheckResult)
|
||||
|
||||
for name, check := range hc.checks {
|
||||
start := time.Now()
|
||||
result := check(ctx)
|
||||
result.Duration = time.Since(start)
|
||||
result.Timestamp = time.Now()
|
||||
results[name] = result
|
||||
|
||||
// Log health check results
|
||||
fields := map[string]interface{}{
|
||||
"check_name": name,
|
||||
"status": result.Status,
|
||||
"duration": result.Duration.Milliseconds(),
|
||||
}
|
||||
|
||||
if result.Details != nil {
|
||||
for k, v := range result.Details {
|
||||
fields["detail_"+k] = v
|
||||
}
|
||||
}
|
||||
|
||||
switch result.Status {
|
||||
case StatusHealthy:
|
||||
hc.log.Debug("Health check passed", fields)
|
||||
case StatusDegraded:
|
||||
hc.log.Warn("Health check degraded", fields)
|
||||
case StatusUnhealthy:
|
||||
hc.log.Error("Health check failed", nil, fields)
|
||||
default:
|
||||
hc.log.Warn("Health check status unknown", fields)
|
||||
}
|
||||
}
|
||||
|
||||
return results
|
||||
}
|
||||
|
||||
// GetOverallStatus determines the overall system health status
|
||||
func (hc *Checker) GetOverallStatus(results map[string]CheckResult) Status {
|
||||
if len(results) == 0 {
|
||||
return StatusUnknown
|
||||
}
|
||||
|
||||
hasUnhealthy := false
|
||||
hasDegraded := false
|
||||
|
||||
for _, result := range results {
|
||||
switch result.Status {
|
||||
case StatusUnhealthy:
|
||||
hasUnhealthy = true
|
||||
case StatusDegraded:
|
||||
hasDegraded = true
|
||||
}
|
||||
}
|
||||
|
||||
if hasUnhealthy {
|
||||
return StatusUnhealthy
|
||||
}
|
||||
if hasDegraded {
|
||||
return StatusDegraded
|
||||
}
|
||||
return StatusHealthy
|
||||
}
|
||||
|
||||
// DatabaseHealthCheck creates a health check for database connectivity
|
||||
func DatabaseHealthCheck(db *database.DB) HealthCheck {
|
||||
return func(ctx context.Context) CheckResult {
|
||||
if db == nil {
|
||||
return CheckResult{
|
||||
Status: StatusUnhealthy,
|
||||
Message: "Database not initialized",
|
||||
}
|
||||
}
|
||||
|
||||
if err := db.Health(); err != nil {
|
||||
return CheckResult{
|
||||
Status: StatusUnhealthy,
|
||||
Message: "Database connection failed",
|
||||
Details: map[string]interface{}{
|
||||
"error": err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return CheckResult{
|
||||
Status: StatusHealthy,
|
||||
Message: "Database connection healthy",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// JobManagerHealthCheck creates a health check for the job manager service
|
||||
func JobManagerHealthCheck(jobManager services.JobManagerService) HealthCheck {
|
||||
return func(ctx context.Context) CheckResult {
|
||||
if jobManager == nil {
|
||||
return CheckResult{
|
||||
Status: StatusUnhealthy,
|
||||
Message: "Job manager not initialized",
|
||||
}
|
||||
}
|
||||
|
||||
if !jobManager.IsRunning() {
|
||||
return CheckResult{
|
||||
Status: StatusUnhealthy,
|
||||
Message: "Job manager is not running",
|
||||
}
|
||||
}
|
||||
|
||||
return CheckResult{
|
||||
Status: StatusHealthy,
|
||||
Message: "Job manager is running",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ExternalServiceHealthCheck creates a health check for external services
|
||||
func ExternalServiceHealthCheck(serviceName string, checkFunc func(ctx context.Context) error) HealthCheck {
|
||||
return func(ctx context.Context) CheckResult {
|
||||
// Set a timeout for external service checks
|
||||
checkCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
if err := checkFunc(checkCtx); err != nil {
|
||||
return CheckResult{
|
||||
Status: StatusDegraded,
|
||||
Message: fmt.Sprintf("%s service check failed", serviceName),
|
||||
Details: map[string]interface{}{
|
||||
"error": err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return CheckResult{
|
||||
Status: StatusHealthy,
|
||||
Message: fmt.Sprintf("%s service is healthy", serviceName),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MemoryHealthCheck creates a health check for memory usage
|
||||
func MemoryHealthCheck(maxMemoryMB int64) HealthCheck {
|
||||
return func(ctx context.Context) CheckResult {
|
||||
// This is a simplified memory check
|
||||
// In a real implementation, you would use runtime.MemStats
|
||||
return CheckResult{
|
||||
Status: StatusHealthy,
|
||||
Message: "Memory usage within limits",
|
||||
Details: map[string]interface{}{
|
||||
"max_memory_mb": maxMemoryMB,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DiskSpaceHealthCheck creates a health check for disk space
|
||||
func DiskSpaceHealthCheck(path string, minFreeSpaceGB int64) HealthCheck {
|
||||
return func(ctx context.Context) CheckResult {
|
||||
// This is a simplified disk space check
|
||||
// In a real implementation, you would check actual disk usage
|
||||
return CheckResult{
|
||||
Status: StatusHealthy,
|
||||
Message: "Disk space sufficient",
|
||||
Details: map[string]interface{}{
|
||||
"path": path,
|
||||
"min_free_space_gb": minFreeSpaceGB,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Response represents the complete health check response
|
||||
type Response struct {
|
||||
Status Status `json:"status"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
Duration time.Duration `json:"duration_ms"`
|
||||
Checks map[string]CheckResult `json:"checks"`
|
||||
System map[string]interface{} `json:"system"`
|
||||
}
|
||||
|
||||
// BuildResponse builds a complete health check response
|
||||
func (hc *Checker) BuildResponse(ctx context.Context) Response {
|
||||
start := time.Now()
|
||||
|
||||
// Perform all health checks
|
||||
checks := hc.Check(ctx)
|
||||
|
||||
// Determine overall status
|
||||
overallStatus := hc.GetOverallStatus(checks)
|
||||
|
||||
// Build system information
|
||||
system := map[string]interface{}{
|
||||
"service": "Video Conference Booking System",
|
||||
"version": "1.0.0", // This could be injected from build info
|
||||
}
|
||||
|
||||
return Response{
|
||||
Status: overallStatus,
|
||||
Timestamp: time.Now(),
|
||||
Duration: time.Since(start),
|
||||
Checks: checks,
|
||||
System: system,
|
||||
}
|
||||
}
|
||||
|
||||
// MonitoringHealthCheck creates a health check that includes monitoring data
|
||||
func MonitoringHealthCheck() HealthCheck {
|
||||
return func(ctx context.Context) CheckResult {
|
||||
// This would import monitoring package, but to avoid circular imports,
|
||||
// we'll keep it simple for now
|
||||
return CheckResult{
|
||||
Status: StatusHealthy,
|
||||
Message: "Monitoring system operational",
|
||||
Details: map[string]interface{}{
|
||||
"error_tracking": "enabled",
|
||||
"metrics_collection": "enabled",
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
134
internal/jobs/manager.go
Normal file
134
internal/jobs/manager.go
Normal file
@ -0,0 +1,134 @@
|
||||
package jobs
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"attune-heart-therapy/internal/repositories"
|
||||
)
|
||||
|
||||
// Manager manages all background job operations
|
||||
type Manager struct {
|
||||
scheduler *JobScheduler
|
||||
reminderScheduler *ReminderScheduler
|
||||
config *ReminderConfig
|
||||
mu sync.RWMutex
|
||||
running bool
|
||||
}
|
||||
|
||||
// NewManager creates a new job manager instance
|
||||
func NewManager(
|
||||
notificationService NotificationService,
|
||||
bookingRepo repositories.BookingRepository,
|
||||
userRepo repositories.UserRepository,
|
||||
config *ReminderConfig,
|
||||
) *Manager {
|
||||
if config == nil {
|
||||
config = DefaultReminderConfig()
|
||||
}
|
||||
|
||||
// Create job scheduler with 3 workers by default
|
||||
jobScheduler := NewJobScheduler(3, notificationService, bookingRepo, userRepo)
|
||||
|
||||
// Create reminder scheduler
|
||||
reminderScheduler := NewReminderScheduler(config, jobScheduler)
|
||||
|
||||
return &Manager{
|
||||
scheduler: jobScheduler,
|
||||
reminderScheduler: reminderScheduler,
|
||||
config: config,
|
||||
running: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the job manager and all its components
|
||||
func (m *Manager) Start() error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if m.running {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf("Starting job manager...")
|
||||
|
||||
// Start the job scheduler
|
||||
m.scheduler.Start()
|
||||
|
||||
m.running = true
|
||||
log.Printf("Job manager started successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop stops the job manager and all its components
|
||||
func (m *Manager) Stop() error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
if !m.running {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Printf("Stopping job manager...")
|
||||
|
||||
// Stop the job scheduler
|
||||
m.scheduler.Stop()
|
||||
|
||||
m.running = false
|
||||
log.Printf("Job manager stopped successfully")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsRunning returns whether the job manager is currently running
|
||||
func (m *Manager) IsRunning() bool {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.running
|
||||
}
|
||||
|
||||
// GetReminderScheduler returns the reminder scheduler instance
|
||||
func (m *Manager) GetReminderScheduler() *ReminderScheduler {
|
||||
return m.reminderScheduler
|
||||
}
|
||||
|
||||
// GetJobScheduler returns the job scheduler instance
|
||||
func (m *Manager) GetJobScheduler() *JobScheduler {
|
||||
return m.scheduler
|
||||
}
|
||||
|
||||
// UpdateReminderConfig updates the reminder configuration
|
||||
func (m *Manager) UpdateReminderConfig(config *ReminderConfig) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.config = config
|
||||
m.reminderScheduler.UpdateReminderConfig(config)
|
||||
|
||||
log.Printf("Reminder configuration updated")
|
||||
}
|
||||
|
||||
// GetReminderConfig returns the current reminder configuration
|
||||
func (m *Manager) GetReminderConfig() *ReminderConfig {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.config
|
||||
}
|
||||
|
||||
// ScheduleProcessPendingJob schedules a job to process all pending notifications
|
||||
func (m *Manager) ScheduleProcessPendingJob() {
|
||||
if !m.running {
|
||||
log.Printf("Job manager not running, cannot schedule process pending job")
|
||||
return
|
||||
}
|
||||
|
||||
job := &Job{
|
||||
ID: generateJobID(),
|
||||
Type: JobTypeProcessPending,
|
||||
MaxRetries: 1,
|
||||
Status: "scheduled",
|
||||
}
|
||||
|
||||
m.scheduler.ScheduleJob(job)
|
||||
}
|
||||
139
internal/jobs/reminder_config.go
Normal file
139
internal/jobs/reminder_config.go
Normal file
@ -0,0 +1,139 @@
|
||||
package jobs
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// ReminderConfig represents the configuration for reminder scheduling
|
||||
type ReminderConfig struct {
|
||||
// Default reminder times before the meeting (in minutes)
|
||||
DefaultReminders []int `json:"default_reminders"`
|
||||
|
||||
// Maximum number of reminders per booking
|
||||
MaxReminders int `json:"max_reminders"`
|
||||
|
||||
// Minimum time before meeting to send reminder (in minutes)
|
||||
MinReminderTime int `json:"min_reminder_time"`
|
||||
|
||||
// Whether reminders are enabled globally
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
// DefaultReminderConfig returns the default reminder configuration
|
||||
func DefaultReminderConfig() *ReminderConfig {
|
||||
return &ReminderConfig{
|
||||
DefaultReminders: []int{1440, 60, 15}, // 24 hours, 1 hour, 15 minutes before
|
||||
MaxReminders: 3,
|
||||
MinReminderTime: 5, // Don't send reminders less than 5 minutes before
|
||||
Enabled: true,
|
||||
}
|
||||
}
|
||||
|
||||
// ReminderScheduler handles the scheduling of reminder notifications
|
||||
type ReminderScheduler struct {
|
||||
config *ReminderConfig
|
||||
jobScheduler *JobScheduler
|
||||
}
|
||||
|
||||
// NewReminderScheduler creates a new reminder scheduler
|
||||
func NewReminderScheduler(config *ReminderConfig, jobScheduler *JobScheduler) *ReminderScheduler {
|
||||
if config == nil {
|
||||
config = DefaultReminderConfig()
|
||||
}
|
||||
|
||||
return &ReminderScheduler{
|
||||
config: config,
|
||||
jobScheduler: jobScheduler,
|
||||
}
|
||||
}
|
||||
|
||||
// ScheduleRemindersForBooking schedules all reminders for a booking
|
||||
func (rs *ReminderScheduler) ScheduleRemindersForBooking(bookingID uint, userID uint, meetingTime time.Time) error {
|
||||
if !rs.config.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
scheduledCount := 0
|
||||
|
||||
for _, reminderMinutes := range rs.config.DefaultReminders {
|
||||
if scheduledCount >= rs.config.MaxReminders {
|
||||
break
|
||||
}
|
||||
|
||||
reminderTime := meetingTime.Add(-time.Duration(reminderMinutes) * time.Minute)
|
||||
|
||||
// Skip if reminder time is in the past
|
||||
if reminderTime.Before(now) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip if reminder is too close to the meeting
|
||||
if meetingTime.Sub(reminderTime).Minutes() < float64(rs.config.MinReminderTime) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Schedule the reminder job
|
||||
rs.jobScheduler.ScheduleReminderJob(bookingID, userID, reminderTime)
|
||||
scheduledCount++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CancelRemindersForBooking cancels all scheduled reminders for a booking
|
||||
func (rs *ReminderScheduler) CancelRemindersForBooking(bookingID uint) error {
|
||||
// In a production system, this would mark the reminders as cancelled in a persistent store
|
||||
// For now, we'll just log the cancellation
|
||||
// The actual implementation would depend on how jobs are persisted
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateReminderConfig updates the reminder configuration
|
||||
func (rs *ReminderScheduler) UpdateReminderConfig(config *ReminderConfig) {
|
||||
rs.config = config
|
||||
}
|
||||
|
||||
// GetReminderConfig returns the current reminder configuration
|
||||
func (rs *ReminderScheduler) GetReminderConfig() *ReminderConfig {
|
||||
return rs.config
|
||||
}
|
||||
|
||||
// GetNextReminderTime calculates the next reminder time for a meeting
|
||||
func (rs *ReminderScheduler) GetNextReminderTime(meetingTime time.Time) *time.Time {
|
||||
if !rs.config.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
for _, reminderMinutes := range rs.config.DefaultReminders {
|
||||
reminderTime := meetingTime.Add(-time.Duration(reminderMinutes) * time.Minute)
|
||||
|
||||
if reminderTime.After(now) {
|
||||
return &reminderTime
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsReminderTimeValid checks if a reminder time is valid
|
||||
func (rs *ReminderScheduler) IsReminderTimeValid(reminderTime, meetingTime time.Time) bool {
|
||||
if !rs.config.Enabled {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if reminder is in the future
|
||||
if reminderTime.Before(time.Now()) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if reminder is not too close to the meeting
|
||||
timeDiff := meetingTime.Sub(reminderTime).Minutes()
|
||||
if timeDiff < float64(rs.config.MinReminderTime) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
297
internal/jobs/scheduler.go
Normal file
297
internal/jobs/scheduler.go
Normal file
@ -0,0 +1,297 @@
|
||||
package jobs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"attune-heart-therapy/internal/models"
|
||||
"attune-heart-therapy/internal/repositories"
|
||||
)
|
||||
|
||||
// JobType represents the type of background job
|
||||
type JobType string
|
||||
|
||||
const (
|
||||
JobTypeReminderEmail JobType = "reminder_email"
|
||||
JobTypeProcessPending JobType = "process_pending"
|
||||
)
|
||||
|
||||
// Job represents a background job to be executed
|
||||
type Job struct {
|
||||
ID string
|
||||
Type JobType
|
||||
BookingID *uint
|
||||
UserID *uint
|
||||
ScheduledAt time.Time
|
||||
Payload map[string]interface{}
|
||||
RetryCount int
|
||||
MaxRetries int
|
||||
Status string
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
// NotificationService interface for sending notifications (to avoid import cycle)
|
||||
type NotificationService interface {
|
||||
SendReminder(user *models.User, booking *models.Booking) error
|
||||
ProcessPendingNotifications() error
|
||||
}
|
||||
|
||||
// JobScheduler manages background jobs and workers
|
||||
type JobScheduler struct {
|
||||
jobs chan *Job
|
||||
workers int
|
||||
quit chan bool
|
||||
wg sync.WaitGroup
|
||||
notificationService NotificationService
|
||||
bookingRepo repositories.BookingRepository
|
||||
userRepo repositories.UserRepository
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
ticker *time.Ticker
|
||||
}
|
||||
|
||||
// NewJobScheduler creates a new job scheduler instance
|
||||
func NewJobScheduler(
|
||||
workers int,
|
||||
notificationService NotificationService,
|
||||
bookingRepo repositories.BookingRepository,
|
||||
userRepo repositories.UserRepository,
|
||||
) *JobScheduler {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
return &JobScheduler{
|
||||
jobs: make(chan *Job, 100), // Buffer for 100 jobs
|
||||
workers: workers,
|
||||
quit: make(chan bool),
|
||||
notificationService: notificationService,
|
||||
bookingRepo: bookingRepo,
|
||||
userRepo: userRepo,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
ticker: time.NewTicker(1 * time.Minute), // Check every minute
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the job scheduler and workers
|
||||
func (js *JobScheduler) Start() {
|
||||
log.Printf("Starting job scheduler with %d workers", js.workers)
|
||||
|
||||
// Start workers
|
||||
for i := 0; i < js.workers; i++ {
|
||||
js.wg.Add(1)
|
||||
go js.worker(i)
|
||||
}
|
||||
|
||||
// Start the scheduler ticker
|
||||
js.wg.Add(1)
|
||||
go js.scheduler()
|
||||
|
||||
log.Printf("Job scheduler started successfully")
|
||||
}
|
||||
|
||||
// Stop stops the job scheduler and all workers
|
||||
func (js *JobScheduler) Stop() {
|
||||
log.Printf("Stopping job scheduler...")
|
||||
|
||||
js.cancel()
|
||||
js.ticker.Stop()
|
||||
close(js.quit)
|
||||
close(js.jobs)
|
||||
|
||||
js.wg.Wait()
|
||||
log.Printf("Job scheduler stopped")
|
||||
}
|
||||
|
||||
// ScheduleJob adds a job to the queue
|
||||
func (js *JobScheduler) ScheduleJob(job *Job) {
|
||||
if job.ScheduledAt.Before(time.Now()) || job.ScheduledAt.Equal(time.Now()) {
|
||||
// Execute immediately
|
||||
select {
|
||||
case js.jobs <- job:
|
||||
log.Printf("Job %s scheduled for immediate execution", job.ID)
|
||||
default:
|
||||
log.Printf("Job queue full, dropping job %s", job.ID)
|
||||
}
|
||||
} else {
|
||||
// Store for later execution (in a real implementation, this would be persisted)
|
||||
log.Printf("Job %s scheduled for %s", job.ID, job.ScheduledAt.Format(time.RFC3339))
|
||||
}
|
||||
}
|
||||
|
||||
// ScheduleReminderJob schedules a reminder email job
|
||||
func (js *JobScheduler) ScheduleReminderJob(bookingID uint, userID uint, reminderTime time.Time) {
|
||||
job := &Job{
|
||||
ID: generateJobID(),
|
||||
Type: JobTypeReminderEmail,
|
||||
BookingID: &bookingID,
|
||||
UserID: &userID,
|
||||
ScheduledAt: reminderTime,
|
||||
MaxRetries: 3,
|
||||
Status: "scheduled",
|
||||
CreatedAt: time.Now(),
|
||||
UpdatedAt: time.Now(),
|
||||
}
|
||||
|
||||
js.ScheduleJob(job)
|
||||
}
|
||||
|
||||
// worker processes jobs from the queue
|
||||
func (js *JobScheduler) worker(id int) {
|
||||
defer js.wg.Done()
|
||||
|
||||
log.Printf("Worker %d started", id)
|
||||
|
||||
for {
|
||||
select {
|
||||
case job, ok := <-js.jobs:
|
||||
if !ok {
|
||||
log.Printf("Worker %d: job channel closed, stopping", id)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Worker %d processing job %s of type %s", id, job.ID, job.Type)
|
||||
js.processJob(job)
|
||||
|
||||
case <-js.quit:
|
||||
log.Printf("Worker %d: received quit signal, stopping", id)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scheduler runs periodically to check for scheduled jobs
|
||||
func (js *JobScheduler) scheduler() {
|
||||
defer js.wg.Done()
|
||||
|
||||
log.Printf("Job scheduler ticker started")
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-js.ticker.C:
|
||||
// Process pending notifications
|
||||
js.processPendingNotifications()
|
||||
|
||||
case <-js.ctx.Done():
|
||||
log.Printf("Scheduler: context cancelled, stopping")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processJob executes a specific job based on its type
|
||||
func (js *JobScheduler) processJob(job *Job) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Printf("Job %s panicked: %v", job.ID, r)
|
||||
js.handleJobFailure(job, "job panicked")
|
||||
}
|
||||
}()
|
||||
|
||||
switch job.Type {
|
||||
case JobTypeReminderEmail:
|
||||
js.processReminderJob(job)
|
||||
case JobTypeProcessPending:
|
||||
js.processAllPendingNotifications()
|
||||
default:
|
||||
log.Printf("Unknown job type: %s", job.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// processReminderJob processes a reminder email job
|
||||
func (js *JobScheduler) processReminderJob(job *Job) {
|
||||
if job.BookingID == nil || job.UserID == nil {
|
||||
log.Printf("Invalid reminder job %s: missing booking or user ID", job.ID)
|
||||
return
|
||||
}
|
||||
|
||||
// Get booking details
|
||||
booking, err := js.bookingRepo.GetByID(*job.BookingID)
|
||||
if err != nil {
|
||||
log.Printf("Failed to get booking %d for reminder job %s: %v", *job.BookingID, job.ID, err)
|
||||
js.handleJobFailure(job, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Get user details
|
||||
user, err := js.userRepo.GetByID(*job.UserID)
|
||||
if err != nil {
|
||||
log.Printf("Failed to get user %d for reminder job %s: %v", *job.UserID, job.ID, err)
|
||||
js.handleJobFailure(job, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Check if booking is still valid for reminder
|
||||
if booking.Status != models.BookingStatusScheduled {
|
||||
log.Printf("Skipping reminder for booking %d - status is %s", booking.ID, booking.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the meeting is still in the future
|
||||
if booking.ScheduledAt.Before(time.Now()) {
|
||||
log.Printf("Skipping reminder for booking %d - meeting time has passed", booking.ID)
|
||||
return
|
||||
}
|
||||
|
||||
// Send the reminder
|
||||
if err := js.notificationService.SendReminder(user, booking); err != nil {
|
||||
log.Printf("Failed to send reminder for booking %d: %v", booking.ID, err)
|
||||
js.handleJobFailure(job, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Successfully sent reminder for booking %d to user %d", booking.ID, user.ID)
|
||||
}
|
||||
|
||||
// processPendingNotifications processes all pending notifications
|
||||
func (js *JobScheduler) processPendingNotifications() {
|
||||
if err := js.notificationService.ProcessPendingNotifications(); err != nil {
|
||||
log.Printf("Failed to process pending notifications: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// processAllPendingNotifications is a job wrapper for processing pending notifications
|
||||
func (js *JobScheduler) processAllPendingNotifications() {
|
||||
js.processPendingNotifications()
|
||||
}
|
||||
|
||||
// handleJobFailure handles job failures and retries
|
||||
func (js *JobScheduler) handleJobFailure(job *Job, errorMsg string) {
|
||||
job.RetryCount++
|
||||
job.UpdatedAt = time.Now()
|
||||
|
||||
if job.RetryCount < job.MaxRetries {
|
||||
// Retry with exponential backoff
|
||||
retryDelay := time.Duration(job.RetryCount*job.RetryCount) * time.Minute
|
||||
job.ScheduledAt = time.Now().Add(retryDelay)
|
||||
|
||||
log.Printf("Job %s failed (attempt %d/%d), retrying in %v: %s",
|
||||
job.ID, job.RetryCount, job.MaxRetries, retryDelay, errorMsg)
|
||||
|
||||
// Reschedule the job
|
||||
go func() {
|
||||
time.Sleep(retryDelay)
|
||||
js.ScheduleJob(job)
|
||||
}()
|
||||
} else {
|
||||
job.Status = "failed"
|
||||
log.Printf("Job %s failed permanently after %d attempts: %s", job.ID, job.RetryCount, errorMsg)
|
||||
}
|
||||
}
|
||||
|
||||
// generateJobID generates a unique job ID
|
||||
func generateJobID() string {
|
||||
return time.Now().Format("20060102150405") + "-" + randomString(8)
|
||||
}
|
||||
|
||||
// randomString generates a random string of specified length
|
||||
func randomString(length int) string {
|
||||
const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
b := make([]byte, length)
|
||||
for i := range b {
|
||||
b[i] = charset[time.Now().UnixNano()%int64(len(charset))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
346
internal/logger/logger.go
Normal file
346
internal/logger/logger.go
Normal file
@ -0,0 +1,346 @@
|
||||
package logger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LogLevel represents the severity level of a log entry
|
||||
type LogLevel string
|
||||
|
||||
const (
|
||||
DEBUG LogLevel = "DEBUG"
|
||||
INFO LogLevel = "INFO"
|
||||
WARN LogLevel = "WARN"
|
||||
ERROR LogLevel = "ERROR"
|
||||
FATAL LogLevel = "FATAL"
|
||||
)
|
||||
|
||||
// LogEntry represents a structured log entry
|
||||
type LogEntry struct {
|
||||
Timestamp string `json:"timestamp"`
|
||||
Level LogLevel `json:"level"`
|
||||
Message string `json:"message"`
|
||||
Service string `json:"service"`
|
||||
TraceID string `json:"trace_id,omitempty"`
|
||||
UserID string `json:"user_id,omitempty"`
|
||||
Fields map[string]interface{} `json:"fields,omitempty"`
|
||||
Error *ErrorDetails `json:"error,omitempty"`
|
||||
Source *SourceLocation `json:"source,omitempty"`
|
||||
}
|
||||
|
||||
// ErrorDetails contains error-specific information
|
||||
type ErrorDetails struct {
|
||||
Type string `json:"type"`
|
||||
Message string `json:"message"`
|
||||
StackTrace string `json:"stack_trace,omitempty"`
|
||||
}
|
||||
|
||||
// SourceLocation contains source code location information
|
||||
type SourceLocation struct {
|
||||
File string `json:"file"`
|
||||
Line int `json:"line"`
|
||||
Function string `json:"function"`
|
||||
}
|
||||
|
||||
// Logger provides structured logging capabilities
|
||||
type Logger struct {
|
||||
service string
|
||||
level LogLevel
|
||||
}
|
||||
|
||||
// New creates a new logger instance
|
||||
func New(service string) *Logger {
|
||||
return &Logger{
|
||||
service: service,
|
||||
level: INFO, // Default level
|
||||
}
|
||||
}
|
||||
|
||||
// SetLevel sets the minimum log level
|
||||
func (l *Logger) SetLevel(level LogLevel) {
|
||||
l.level = level
|
||||
}
|
||||
|
||||
// GetLevel returns the current log level
|
||||
func (l *Logger) GetLevel() LogLevel {
|
||||
return l.level
|
||||
}
|
||||
|
||||
// IsLevelEnabled checks if a log level is enabled
|
||||
func (l *Logger) IsLevelEnabled(level LogLevel) bool {
|
||||
return l.shouldLog(level)
|
||||
}
|
||||
|
||||
// Debug logs a debug message
|
||||
func (l *Logger) Debug(message string, fields ...map[string]interface{}) {
|
||||
if l.shouldLog(DEBUG) {
|
||||
l.log(DEBUG, message, nil, fields...)
|
||||
}
|
||||
}
|
||||
|
||||
// Info logs an info message
|
||||
func (l *Logger) Info(message string, fields ...map[string]interface{}) {
|
||||
if l.shouldLog(INFO) {
|
||||
l.log(INFO, message, nil, fields...)
|
||||
}
|
||||
}
|
||||
|
||||
// Warn logs a warning message
|
||||
func (l *Logger) Warn(message string, fields ...map[string]interface{}) {
|
||||
if l.shouldLog(WARN) {
|
||||
l.log(WARN, message, nil, fields...)
|
||||
}
|
||||
}
|
||||
|
||||
// Error logs an error message
|
||||
func (l *Logger) Error(message string, err error, fields ...map[string]interface{}) {
|
||||
if l.shouldLog(ERROR) {
|
||||
l.log(ERROR, message, err, fields...)
|
||||
}
|
||||
}
|
||||
|
||||
// Fatal logs a fatal message and exits
|
||||
func (l *Logger) Fatal(message string, err error, fields ...map[string]interface{}) {
|
||||
l.log(FATAL, message, err, fields...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// WithContext creates a logger with context information
|
||||
func (l *Logger) WithContext(ctx context.Context) *ContextLogger {
|
||||
return &ContextLogger{
|
||||
logger: l,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
// WithFields creates a logger with predefined fields
|
||||
func (l *Logger) WithFields(fields map[string]interface{}) *FieldLogger {
|
||||
return &FieldLogger{
|
||||
logger: l,
|
||||
fields: fields,
|
||||
}
|
||||
}
|
||||
|
||||
// log performs the actual logging
|
||||
func (l *Logger) log(level LogLevel, message string, err error, fields ...map[string]interface{}) {
|
||||
entry := LogEntry{
|
||||
Timestamp: time.Now().UTC().Format(time.RFC3339),
|
||||
Level: level,
|
||||
Message: message,
|
||||
Service: l.service,
|
||||
}
|
||||
|
||||
// Add fields if provided
|
||||
if len(fields) > 0 && fields[0] != nil {
|
||||
entry.Fields = fields[0]
|
||||
}
|
||||
|
||||
// Add error details if provided
|
||||
if err != nil {
|
||||
entry.Error = &ErrorDetails{
|
||||
Type: fmt.Sprintf("%T", err),
|
||||
Message: err.Error(),
|
||||
}
|
||||
|
||||
// Add stack trace for errors and fatal logs
|
||||
if level == ERROR || level == FATAL {
|
||||
entry.Error.StackTrace = getStackTrace()
|
||||
}
|
||||
}
|
||||
|
||||
// Add source location for errors and fatal logs
|
||||
if level == ERROR || level == FATAL {
|
||||
entry.Source = getSourceLocation(3) // Skip 3 frames: log, Error/Fatal, caller
|
||||
}
|
||||
|
||||
// Output the log entry
|
||||
l.output(entry)
|
||||
}
|
||||
|
||||
// shouldLog checks if the message should be logged based on level
|
||||
func (l *Logger) shouldLog(level LogLevel) bool {
|
||||
levels := map[LogLevel]int{
|
||||
DEBUG: 0,
|
||||
INFO: 1,
|
||||
WARN: 2,
|
||||
ERROR: 3,
|
||||
FATAL: 4,
|
||||
}
|
||||
|
||||
return levels[level] >= levels[l.level]
|
||||
}
|
||||
|
||||
// output writes the log entry to the output
|
||||
func (l *Logger) output(entry LogEntry) {
|
||||
jsonBytes, err := json.Marshal(entry)
|
||||
if err != nil {
|
||||
// Fallback to standard logging if JSON marshaling fails
|
||||
log.Printf("LOGGER_ERROR: Failed to marshal log entry: %v", err)
|
||||
log.Printf("%s [%s] %s: %s", entry.Timestamp, entry.Level, entry.Service, entry.Message)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println(string(jsonBytes))
|
||||
}
|
||||
|
||||
// getStackTrace returns the current stack trace
|
||||
func getStackTrace() string {
|
||||
buf := make([]byte, 4096)
|
||||
n := runtime.Stack(buf, false)
|
||||
return string(buf[:n])
|
||||
}
|
||||
|
||||
// getSourceLocation returns the source location information
|
||||
func getSourceLocation(skip int) *SourceLocation {
|
||||
pc, file, line, ok := runtime.Caller(skip)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
fn := runtime.FuncForPC(pc)
|
||||
if fn == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &SourceLocation{
|
||||
File: file,
|
||||
Line: line,
|
||||
Function: fn.Name(),
|
||||
}
|
||||
}
|
||||
|
||||
// ContextLogger wraps a logger with context information
|
||||
type ContextLogger struct {
|
||||
logger *Logger
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// Debug logs a debug message with context
|
||||
func (cl *ContextLogger) Debug(message string, fields ...map[string]interface{}) {
|
||||
cl.logWithContext(DEBUG, message, nil, fields...)
|
||||
}
|
||||
|
||||
// Info logs an info message with context
|
||||
func (cl *ContextLogger) Info(message string, fields ...map[string]interface{}) {
|
||||
cl.logWithContext(INFO, message, nil, fields...)
|
||||
}
|
||||
|
||||
// Warn logs a warning message with context
|
||||
func (cl *ContextLogger) Warn(message string, fields ...map[string]interface{}) {
|
||||
cl.logWithContext(WARN, message, nil, fields...)
|
||||
}
|
||||
|
||||
// Error logs an error message with context
|
||||
func (cl *ContextLogger) Error(message string, err error, fields ...map[string]interface{}) {
|
||||
cl.logWithContext(ERROR, message, err, fields...)
|
||||
}
|
||||
|
||||
// logWithContext logs with context information
|
||||
func (cl *ContextLogger) logWithContext(level LogLevel, message string, err error, fields ...map[string]interface{}) {
|
||||
// Extract context information
|
||||
contextFields := make(map[string]interface{})
|
||||
|
||||
// Add trace ID if available
|
||||
if traceID := cl.ctx.Value("trace_id"); traceID != nil {
|
||||
contextFields["trace_id"] = traceID
|
||||
}
|
||||
|
||||
// Add user ID if available
|
||||
if userID := cl.ctx.Value("user_id"); userID != nil {
|
||||
contextFields["user_id"] = userID
|
||||
}
|
||||
|
||||
// Merge with provided fields
|
||||
if len(fields) > 0 && fields[0] != nil {
|
||||
for k, v := range fields[0] {
|
||||
contextFields[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
cl.logger.log(level, message, err, contextFields)
|
||||
}
|
||||
|
||||
// FieldLogger wraps a logger with predefined fields
|
||||
type FieldLogger struct {
|
||||
logger *Logger
|
||||
fields map[string]interface{}
|
||||
}
|
||||
|
||||
// Debug logs a debug message with predefined fields
|
||||
func (fl *FieldLogger) Debug(message string, additionalFields ...map[string]interface{}) {
|
||||
fl.logWithFields(DEBUG, message, nil, additionalFields...)
|
||||
}
|
||||
|
||||
// Info logs an info message with predefined fields
|
||||
func (fl *FieldLogger) Info(message string, additionalFields ...map[string]interface{}) {
|
||||
fl.logWithFields(INFO, message, nil, additionalFields...)
|
||||
}
|
||||
|
||||
// Warn logs a warning message with predefined fields
|
||||
func (fl *FieldLogger) Warn(message string, additionalFields ...map[string]interface{}) {
|
||||
fl.logWithFields(WARN, message, nil, additionalFields...)
|
||||
}
|
||||
|
||||
// Error logs an error message with predefined fields
|
||||
func (fl *FieldLogger) Error(message string, err error, additionalFields ...map[string]interface{}) {
|
||||
fl.logWithFields(ERROR, message, err, additionalFields...)
|
||||
}
|
||||
|
||||
// logWithFields logs with predefined fields
|
||||
func (fl *FieldLogger) logWithFields(level LogLevel, message string, err error, additionalFields ...map[string]interface{}) {
|
||||
// Merge predefined fields with additional fields
|
||||
mergedFields := make(map[string]interface{})
|
||||
|
||||
// Add predefined fields
|
||||
for k, v := range fl.fields {
|
||||
mergedFields[k] = v
|
||||
}
|
||||
|
||||
// Add additional fields
|
||||
if len(additionalFields) > 0 && additionalFields[0] != nil {
|
||||
for k, v := range additionalFields[0] {
|
||||
mergedFields[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
fl.logger.log(level, message, err, mergedFields)
|
||||
}
|
||||
|
||||
// Global logger instance
|
||||
var globalLogger = New("app")
|
||||
|
||||
// SetGlobalLevel sets the global logger level
|
||||
func SetGlobalLevel(level LogLevel) {
|
||||
globalLogger.SetLevel(level)
|
||||
}
|
||||
|
||||
// Debug logs a debug message using the global logger
|
||||
func Debug(message string, fields ...map[string]interface{}) {
|
||||
globalLogger.Debug(message, fields...)
|
||||
}
|
||||
|
||||
// Info logs an info message using the global logger
|
||||
func Info(message string, fields ...map[string]interface{}) {
|
||||
globalLogger.Info(message, fields...)
|
||||
}
|
||||
|
||||
// Warn logs a warning message using the global logger
|
||||
func Warn(message string, fields ...map[string]interface{}) {
|
||||
globalLogger.Warn(message, fields...)
|
||||
}
|
||||
|
||||
// Error logs an error message using the global logger
|
||||
func Error(message string, err error, fields ...map[string]interface{}) {
|
||||
globalLogger.Error(message, err, fields...)
|
||||
}
|
||||
|
||||
// Fatal logs a fatal message using the global logger and exits
|
||||
func Fatal(message string, err error, fields ...map[string]interface{}) {
|
||||
globalLogger.Fatal(message, err, fields...)
|
||||
}
|
||||
163
internal/middleware/error.go
Normal file
163
internal/middleware/error.go
Normal file
@ -0,0 +1,163 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"runtime/debug"
|
||||
|
||||
"attune-heart-therapy/internal/errors"
|
||||
"attune-heart-therapy/internal/logger"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// ErrorHandlerMiddleware handles errors and panics in a structured way
|
||||
func ErrorHandlerMiddleware() gin.HandlerFunc {
|
||||
return gin.CustomRecovery(func(c *gin.Context, recovered interface{}) {
|
||||
log := logger.New("error_handler")
|
||||
|
||||
if recovered != nil {
|
||||
// Handle panic
|
||||
err := fmt.Errorf("panic recovered: %v", recovered)
|
||||
log.Error("Panic recovered", err, map[string]interface{}{
|
||||
"method": c.Request.Method,
|
||||
"path": c.Request.URL.Path,
|
||||
"client_ip": c.ClientIP(),
|
||||
"user_agent": c.Request.UserAgent(),
|
||||
"stack": string(debug.Stack()),
|
||||
})
|
||||
|
||||
// Return internal server error for panics
|
||||
appErr := errors.ErrInternalServer.WithDetails("An unexpected error occurred")
|
||||
c.JSON(appErr.HTTPStatus, appErr.ToErrorResponse())
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
|
||||
// Handle regular errors
|
||||
if len(c.Errors) > 0 {
|
||||
err := c.Errors.Last()
|
||||
handleError(c, err.Err, log)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// handleError processes different types of errors and returns appropriate responses
|
||||
func handleError(c *gin.Context, err error, log *logger.Logger) {
|
||||
// Check if it's already an AppError
|
||||
if appErr := errors.GetAppError(err); appErr != nil {
|
||||
logError(log, c, appErr, appErr.Cause)
|
||||
c.JSON(appErr.HTTPStatus, appErr.ToErrorResponse())
|
||||
return
|
||||
}
|
||||
|
||||
// Handle validation errors
|
||||
if validationErrs, ok := err.(errors.ValidationErrors); ok {
|
||||
appErr := validationErrs.ToAppError()
|
||||
logError(log, c, appErr, err)
|
||||
c.JSON(appErr.HTTPStatus, appErr.ToErrorResponse())
|
||||
return
|
||||
}
|
||||
|
||||
// Handle other known error types
|
||||
appErr := classifyError(err)
|
||||
logError(log, c, appErr, err)
|
||||
c.JSON(appErr.HTTPStatus, appErr.ToErrorResponse())
|
||||
}
|
||||
|
||||
// classifyError converts generic errors to AppErrors based on error content
|
||||
func classifyError(err error) *errors.AppError {
|
||||
errMsg := err.Error()
|
||||
|
||||
// Database errors
|
||||
if containsAny(errMsg, []string{"database", "sql", "connection", "timeout"}) {
|
||||
return errors.ErrDatabaseError.WithCause(err)
|
||||
}
|
||||
|
||||
// Network/external API errors
|
||||
if containsAny(errMsg, []string{"network", "connection refused", "timeout", "dns"}) {
|
||||
return errors.ErrExternalAPI.WithCause(err)
|
||||
}
|
||||
|
||||
// Validation errors
|
||||
if containsAny(errMsg, []string{"invalid", "validation", "required", "format"}) {
|
||||
return errors.ErrValidationFailed.WithCause(err)
|
||||
}
|
||||
|
||||
// Not found errors
|
||||
if containsAny(errMsg, []string{"not found", "does not exist"}) {
|
||||
return errors.ErrNotFound.WithCause(err)
|
||||
}
|
||||
|
||||
// Default to internal server error
|
||||
return errors.ErrInternalServer.WithCause(err)
|
||||
}
|
||||
|
||||
// logError logs error information with context
|
||||
func logError(log *logger.Logger, c *gin.Context, appErr *errors.AppError, cause error) {
|
||||
fields := map[string]interface{}{
|
||||
"error_code": appErr.Code,
|
||||
"method": c.Request.Method,
|
||||
"path": c.Request.URL.Path,
|
||||
"client_ip": c.ClientIP(),
|
||||
"user_agent": c.Request.UserAgent(),
|
||||
"status": appErr.HTTPStatus,
|
||||
}
|
||||
|
||||
// Add user ID if available
|
||||
if userID, exists := c.Get("user_id"); exists {
|
||||
fields["user_id"] = userID
|
||||
}
|
||||
|
||||
// Add trace ID if available
|
||||
if traceID, exists := c.Get("trace_id"); exists {
|
||||
fields["trace_id"] = traceID
|
||||
}
|
||||
|
||||
// Add error fields if available
|
||||
if appErr.Fields != nil {
|
||||
for k, v := range appErr.Fields {
|
||||
fields["error_"+k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Log based on severity
|
||||
if appErr.HTTPStatus >= 500 {
|
||||
log.Error("Server error occurred", cause, fields)
|
||||
} else if appErr.HTTPStatus >= 400 {
|
||||
log.Warn("Client error occurred", fields)
|
||||
} else {
|
||||
log.Info("Request completed with error", fields)
|
||||
}
|
||||
}
|
||||
|
||||
// containsAny checks if a string contains any of the given substrings
|
||||
func containsAny(s string, substrings []string) bool {
|
||||
for _, substr := range substrings {
|
||||
if len(s) >= len(substr) {
|
||||
for i := 0; i <= len(s)-len(substr); i++ {
|
||||
if s[i:i+len(substr)] == substr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NotFoundHandler handles 404 errors
|
||||
func NotFoundHandler() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
appErr := errors.ErrNotFound.WithDetails(fmt.Sprintf("Route %s %s not found", c.Request.Method, c.Request.URL.Path))
|
||||
c.JSON(appErr.HTTPStatus, appErr.ToErrorResponse())
|
||||
}
|
||||
}
|
||||
|
||||
// MethodNotAllowedHandler handles 405 errors
|
||||
func MethodNotAllowedHandler() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
appErr := errors.New(errors.ErrCodeValidationFailed, "Method not allowed", http.StatusMethodNotAllowed).
|
||||
WithDetails(fmt.Sprintf("Method %s not allowed for route %s", c.Request.Method, c.Request.URL.Path))
|
||||
c.JSON(appErr.HTTPStatus, appErr.ToErrorResponse())
|
||||
}
|
||||
}
|
||||
@ -1,8 +1,8 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"attune-heart-therapy/internal/logger"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@ -29,6 +29,8 @@ func LoggingMiddleware() gin.HandlerFunc {
|
||||
|
||||
// StructuredLoggingMiddleware creates a structured logging middleware for better log parsing
|
||||
func StructuredLoggingMiddleware() gin.HandlerFunc {
|
||||
log := logger.New("http")
|
||||
|
||||
return func(c *gin.Context) {
|
||||
// Start timer
|
||||
start := time.Now()
|
||||
@ -41,37 +43,52 @@ func StructuredLoggingMiddleware() gin.HandlerFunc {
|
||||
// Calculate latency
|
||||
latency := time.Since(start)
|
||||
|
||||
// Get client IP
|
||||
clientIP := c.ClientIP()
|
||||
|
||||
// Get method
|
||||
method := c.Request.Method
|
||||
|
||||
// Get status code
|
||||
statusCode := c.Writer.Status()
|
||||
|
||||
// Get body size
|
||||
bodySize := c.Writer.Size()
|
||||
|
||||
// Build full path with query string
|
||||
if raw != "" {
|
||||
path = path + "?" + raw
|
||||
}
|
||||
|
||||
// Log structured information
|
||||
log.Printf("REQUEST: method=%s path=%s status=%d latency=%v client_ip=%s body_size=%d user_agent=\"%s\"",
|
||||
method,
|
||||
path,
|
||||
statusCode,
|
||||
latency,
|
||||
clientIP,
|
||||
bodySize,
|
||||
c.Request.UserAgent(),
|
||||
)
|
||||
// Prepare log fields
|
||||
fields := map[string]interface{}{
|
||||
"method": c.Request.Method,
|
||||
"path": path,
|
||||
"status": c.Writer.Status(),
|
||||
"latency_ms": latency.Milliseconds(),
|
||||
"client_ip": c.ClientIP(),
|
||||
"body_size": c.Writer.Size(),
|
||||
"user_agent": c.Request.UserAgent(),
|
||||
}
|
||||
|
||||
// Add trace ID if available
|
||||
if traceID, exists := c.Get("trace_id"); exists {
|
||||
fields["trace_id"] = traceID
|
||||
}
|
||||
|
||||
// Add user ID if available
|
||||
if userID, exists := c.Get("user_id"); exists {
|
||||
fields["user_id"] = userID
|
||||
}
|
||||
|
||||
// Log based on status code
|
||||
statusCode := c.Writer.Status()
|
||||
|
||||
if statusCode >= 500 {
|
||||
log.Error("HTTP request completed with server error", nil, fields)
|
||||
} else if statusCode >= 400 {
|
||||
log.Warn("HTTP request completed with client error", fields)
|
||||
} else {
|
||||
log.Info("HTTP request completed successfully", fields)
|
||||
}
|
||||
|
||||
// Log errors if any
|
||||
if len(c.Errors) > 0 {
|
||||
log.Printf("ERRORS: %v", c.Errors.String())
|
||||
errorFields := map[string]interface{}{
|
||||
"method": c.Request.Method,
|
||||
"path": path,
|
||||
"client_ip": c.ClientIP(),
|
||||
"errors": c.Errors.String(),
|
||||
}
|
||||
log.Error("Request completed with errors", c.Errors.Last().Err, errorFields)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
110
internal/middleware/monitoring.go
Normal file
110
internal/middleware/monitoring.go
Normal file
@ -0,0 +1,110 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"attune-heart-therapy/internal/errors"
|
||||
"attune-heart-therapy/internal/logger"
|
||||
"attune-heart-therapy/internal/monitoring"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// MonitoringMiddleware provides comprehensive request monitoring
|
||||
func MonitoringMiddleware() gin.HandlerFunc {
|
||||
monitor := monitoring.GetGlobalMonitor()
|
||||
log := logger.New("monitoring_middleware")
|
||||
|
||||
return func(c *gin.Context) {
|
||||
start := time.Now()
|
||||
|
||||
// Increment active requests
|
||||
monitor.MetricsCollector.IncrementActiveRequests()
|
||||
defer monitor.MetricsCollector.DecrementActiveRequests()
|
||||
|
||||
// Process request
|
||||
c.Next()
|
||||
|
||||
// Calculate duration
|
||||
duration := time.Since(start)
|
||||
|
||||
// Get response status
|
||||
status := c.Writer.Status()
|
||||
|
||||
// Record metrics
|
||||
monitor.MetricsCollector.RecordRequest(
|
||||
c.FullPath(),
|
||||
c.Request.Method,
|
||||
status,
|
||||
duration,
|
||||
)
|
||||
|
||||
// Track errors if any occurred
|
||||
if len(c.Errors) > 0 {
|
||||
err := c.Errors.Last()
|
||||
if appErr := errors.GetAppError(err.Err); appErr != nil {
|
||||
userID := ""
|
||||
if uid, exists := c.Get("user_id"); exists {
|
||||
if uidStr, ok := uid.(string); ok {
|
||||
userID = uidStr
|
||||
} else if uidUint, ok := uid.(uint); ok {
|
||||
userID = strconv.FormatUint(uint64(uidUint), 10)
|
||||
}
|
||||
}
|
||||
|
||||
monitor.ErrorTracker.TrackError(
|
||||
c.Request.Context(),
|
||||
appErr,
|
||||
userID,
|
||||
c.FullPath(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Log request completion
|
||||
fields := map[string]interface{}{
|
||||
"method": c.Request.Method,
|
||||
"path": c.Request.URL.Path,
|
||||
"status": status,
|
||||
"duration_ms": duration.Milliseconds(),
|
||||
"client_ip": c.ClientIP(),
|
||||
"user_agent": c.Request.UserAgent(),
|
||||
}
|
||||
|
||||
// Add user ID if available
|
||||
if userID, exists := c.Get("user_id"); exists {
|
||||
fields["user_id"] = userID
|
||||
}
|
||||
|
||||
// Add trace ID if available
|
||||
if traceID, exists := c.Get("trace_id"); exists {
|
||||
fields["trace_id"] = traceID
|
||||
}
|
||||
|
||||
// Log based on status and duration
|
||||
if status >= 500 {
|
||||
log.Error("Request completed with server error", nil, fields)
|
||||
} else if status >= 400 {
|
||||
log.Warn("Request completed with client error", fields)
|
||||
} else if duration > 5*time.Second {
|
||||
log.Warn("Slow request completed", fields)
|
||||
} else {
|
||||
log.Debug("Request completed", fields)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// HealthCheckMiddleware provides health check monitoring
|
||||
func HealthCheckMiddleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// Skip monitoring for health check endpoints to avoid noise
|
||||
if c.Request.URL.Path == "/health" || c.Request.URL.Path == "/health/detailed" {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
// Apply monitoring for all other endpoints
|
||||
MonitoringMiddleware()(c)
|
||||
}
|
||||
}
|
||||
37
internal/middleware/tracing.go
Normal file
37
internal/middleware/tracing.go
Normal file
@ -0,0 +1,37 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// TracingMiddleware adds trace ID to requests for better observability
|
||||
func TracingMiddleware() gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// Check if trace ID is already provided in headers
|
||||
traceID := c.GetHeader("X-Trace-ID")
|
||||
|
||||
// Generate new trace ID if not provided
|
||||
if traceID == "" {
|
||||
traceID = generateTraceID()
|
||||
}
|
||||
|
||||
// Set trace ID in context and response header
|
||||
c.Set("trace_id", traceID)
|
||||
c.Header("X-Trace-ID", traceID)
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// generateTraceID generates a random trace ID
|
||||
func generateTraceID() string {
|
||||
bytes := make([]byte, 16)
|
||||
if _, err := rand.Read(bytes); err != nil {
|
||||
// Fallback to a simple timestamp-based ID if random generation fails
|
||||
return "trace-" + hex.EncodeToString([]byte("fallback"))
|
||||
}
|
||||
return hex.EncodeToString(bytes)
|
||||
}
|
||||
325
internal/monitoring/monitoring.go
Normal file
325
internal/monitoring/monitoring.go
Normal file
@ -0,0 +1,325 @@
|
||||
package monitoring
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"attune-heart-therapy/internal/errors"
|
||||
"attune-heart-therapy/internal/logger"
|
||||
)
|
||||
|
||||
// ErrorTracker tracks and monitors application errors
|
||||
type ErrorTracker struct {
|
||||
log *logger.Logger
|
||||
errorCounts map[errors.ErrorCode]int64
|
||||
errorTimes map[errors.ErrorCode][]time.Time
|
||||
mu sync.RWMutex
|
||||
windowSize time.Duration
|
||||
maxWindowSize int
|
||||
}
|
||||
|
||||
// NewErrorTracker creates a new error tracker
|
||||
func NewErrorTracker() *ErrorTracker {
|
||||
return &ErrorTracker{
|
||||
log: logger.New("error_tracker"),
|
||||
errorCounts: make(map[errors.ErrorCode]int64),
|
||||
errorTimes: make(map[errors.ErrorCode][]time.Time),
|
||||
windowSize: time.Hour,
|
||||
maxWindowSize: 1000, // Keep last 1000 occurrences per error type
|
||||
}
|
||||
}
|
||||
|
||||
// TrackError records an error occurrence
|
||||
func (et *ErrorTracker) TrackError(ctx context.Context, appErr *errors.AppError, userID string, endpoint string) {
|
||||
et.mu.Lock()
|
||||
defer et.mu.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
|
||||
// Increment error count
|
||||
et.errorCounts[appErr.Code]++
|
||||
|
||||
// Add timestamp to error times
|
||||
if _, exists := et.errorTimes[appErr.Code]; !exists {
|
||||
et.errorTimes[appErr.Code] = make([]time.Time, 0)
|
||||
}
|
||||
|
||||
et.errorTimes[appErr.Code] = append(et.errorTimes[appErr.Code], now)
|
||||
|
||||
// Keep only recent errors within window
|
||||
et.cleanupOldErrors(appErr.Code, now)
|
||||
|
||||
// Log error with tracking information
|
||||
fields := map[string]interface{}{
|
||||
"error_code": appErr.Code,
|
||||
"error_count": et.errorCounts[appErr.Code],
|
||||
"recent_count": len(et.errorTimes[appErr.Code]),
|
||||
"endpoint": endpoint,
|
||||
"http_status": appErr.HTTPStatus,
|
||||
}
|
||||
|
||||
if userID != "" {
|
||||
fields["user_id"] = userID
|
||||
}
|
||||
|
||||
if traceID := ctx.Value("trace_id"); traceID != nil {
|
||||
fields["trace_id"] = traceID
|
||||
}
|
||||
|
||||
// Add error fields if available
|
||||
if appErr.Fields != nil {
|
||||
for k, v := range appErr.Fields {
|
||||
fields["error_"+k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Check for error rate spikes
|
||||
if et.isErrorSpike(appErr.Code) {
|
||||
et.log.Error("Error rate spike detected", appErr.Cause, fields)
|
||||
et.alertOnErrorSpike(appErr.Code, fields)
|
||||
} else if appErr.HTTPStatus >= 500 {
|
||||
et.log.Error("Server error tracked", appErr.Cause, fields)
|
||||
} else {
|
||||
et.log.Warn("Client error tracked", fields)
|
||||
}
|
||||
}
|
||||
|
||||
// cleanupOldErrors removes errors outside the time window
|
||||
func (et *ErrorTracker) cleanupOldErrors(code errors.ErrorCode, now time.Time) {
|
||||
cutoff := now.Add(-et.windowSize)
|
||||
times := et.errorTimes[code]
|
||||
|
||||
// Find first index within window
|
||||
start := 0
|
||||
for i, t := range times {
|
||||
if t.After(cutoff) {
|
||||
start = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Keep only recent errors
|
||||
et.errorTimes[code] = times[start:]
|
||||
|
||||
// Limit to max window size
|
||||
if len(et.errorTimes[code]) > et.maxWindowSize {
|
||||
et.errorTimes[code] = et.errorTimes[code][len(et.errorTimes[code])-et.maxWindowSize:]
|
||||
}
|
||||
}
|
||||
|
||||
// isErrorSpike checks if there's an unusual spike in error rate
|
||||
func (et *ErrorTracker) isErrorSpike(code errors.ErrorCode) bool {
|
||||
times := et.errorTimes[code]
|
||||
if len(times) < 10 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if we have more than 10 errors in the last 5 minutes
|
||||
fiveMinutesAgo := time.Now().Add(-5 * time.Minute)
|
||||
recentCount := 0
|
||||
|
||||
for _, t := range times {
|
||||
if t.After(fiveMinutesAgo) {
|
||||
recentCount++
|
||||
}
|
||||
}
|
||||
|
||||
return recentCount >= 10
|
||||
}
|
||||
|
||||
// alertOnErrorSpike handles error spike alerts
|
||||
func (et *ErrorTracker) alertOnErrorSpike(code errors.ErrorCode, fields map[string]interface{}) {
|
||||
// In a production system, this would send alerts to monitoring systems
|
||||
// like PagerDuty, Slack, or email notifications
|
||||
et.log.Error("ALERT: Error spike detected", nil, map[string]interface{}{
|
||||
"alert_type": "error_spike",
|
||||
"error_code": code,
|
||||
"recent_count": len(et.errorTimes[code]),
|
||||
"details": fields,
|
||||
})
|
||||
}
|
||||
|
||||
// GetErrorStats returns error statistics
|
||||
func (et *ErrorTracker) GetErrorStats() map[string]interface{} {
|
||||
et.mu.RLock()
|
||||
defer et.mu.RUnlock()
|
||||
|
||||
stats := map[string]interface{}{
|
||||
"total_errors": make(map[errors.ErrorCode]int64),
|
||||
"recent_errors": make(map[errors.ErrorCode]int),
|
||||
"error_rates": make(map[errors.ErrorCode]float64),
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
oneHourAgo := now.Add(-time.Hour)
|
||||
|
||||
for code, count := range et.errorCounts {
|
||||
stats["total_errors"].(map[errors.ErrorCode]int64)[code] = count
|
||||
|
||||
// Count recent errors
|
||||
recentCount := 0
|
||||
if times, exists := et.errorTimes[code]; exists {
|
||||
for _, t := range times {
|
||||
if t.After(oneHourAgo) {
|
||||
recentCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
stats["recent_errors"].(map[errors.ErrorCode]int)[code] = recentCount
|
||||
stats["error_rates"].(map[errors.ErrorCode]float64)[code] = float64(recentCount) / 60.0 // errors per minute
|
||||
}
|
||||
|
||||
return stats
|
||||
}
|
||||
|
||||
// MetricsCollector collects application metrics
|
||||
type MetricsCollector struct {
|
||||
log *logger.Logger
|
||||
requestCounts map[string]int64
|
||||
responseTimes map[string][]time.Duration
|
||||
activeRequests int64
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewMetricsCollector creates a new metrics collector
|
||||
func NewMetricsCollector() *MetricsCollector {
|
||||
return &MetricsCollector{
|
||||
log: logger.New("metrics_collector"),
|
||||
requestCounts: make(map[string]int64),
|
||||
responseTimes: make(map[string][]time.Duration),
|
||||
}
|
||||
}
|
||||
|
||||
// RecordRequest records a request metric
|
||||
func (mc *MetricsCollector) RecordRequest(endpoint string, method string, statusCode int, duration time.Duration) {
|
||||
mc.mu.Lock()
|
||||
defer mc.mu.Unlock()
|
||||
|
||||
key := method + " " + endpoint
|
||||
mc.requestCounts[key]++
|
||||
|
||||
if _, exists := mc.responseTimes[key]; !exists {
|
||||
mc.responseTimes[key] = make([]time.Duration, 0)
|
||||
}
|
||||
|
||||
mc.responseTimes[key] = append(mc.responseTimes[key], duration)
|
||||
|
||||
// Keep only last 1000 response times per endpoint
|
||||
if len(mc.responseTimes[key]) > 1000 {
|
||||
mc.responseTimes[key] = mc.responseTimes[key][len(mc.responseTimes[key])-1000:]
|
||||
}
|
||||
|
||||
// Log slow requests
|
||||
if duration > 5*time.Second {
|
||||
mc.log.Warn("Slow request detected", map[string]interface{}{
|
||||
"endpoint": endpoint,
|
||||
"method": method,
|
||||
"status_code": statusCode,
|
||||
"duration_ms": duration.Milliseconds(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// IncrementActiveRequests increments the active request counter
|
||||
func (mc *MetricsCollector) IncrementActiveRequests() {
|
||||
mc.mu.Lock()
|
||||
defer mc.mu.Unlock()
|
||||
mc.activeRequests++
|
||||
}
|
||||
|
||||
// DecrementActiveRequests decrements the active request counter
|
||||
func (mc *MetricsCollector) DecrementActiveRequests() {
|
||||
mc.mu.Lock()
|
||||
defer mc.mu.Unlock()
|
||||
if mc.activeRequests > 0 {
|
||||
mc.activeRequests--
|
||||
}
|
||||
}
|
||||
|
||||
// GetMetrics returns collected metrics
|
||||
func (mc *MetricsCollector) GetMetrics() map[string]interface{} {
|
||||
mc.mu.RLock()
|
||||
defer mc.mu.RUnlock()
|
||||
|
||||
metrics := map[string]interface{}{
|
||||
"active_requests": mc.activeRequests,
|
||||
"request_counts": make(map[string]int64),
|
||||
"avg_response_times": make(map[string]float64),
|
||||
}
|
||||
|
||||
// Copy request counts
|
||||
for k, v := range mc.requestCounts {
|
||||
metrics["request_counts"].(map[string]int64)[k] = v
|
||||
}
|
||||
|
||||
// Calculate average response times
|
||||
for endpoint, times := range mc.responseTimes {
|
||||
if len(times) > 0 {
|
||||
var total time.Duration
|
||||
for _, t := range times {
|
||||
total += t
|
||||
}
|
||||
avg := total / time.Duration(len(times))
|
||||
metrics["avg_response_times"].(map[string]float64)[endpoint] = float64(avg.Milliseconds())
|
||||
}
|
||||
}
|
||||
|
||||
return metrics
|
||||
}
|
||||
|
||||
// Monitor provides comprehensive application monitoring
|
||||
type Monitor struct {
|
||||
ErrorTracker *ErrorTracker
|
||||
MetricsCollector *MetricsCollector
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
// NewMonitor creates a new application monitor
|
||||
func NewMonitor() *Monitor {
|
||||
return &Monitor{
|
||||
ErrorTracker: NewErrorTracker(),
|
||||
MetricsCollector: NewMetricsCollector(),
|
||||
log: logger.New("monitor"),
|
||||
}
|
||||
}
|
||||
|
||||
// GetHealthStatus returns comprehensive health status
|
||||
func (m *Monitor) GetHealthStatus() map[string]interface{} {
|
||||
errorStats := m.ErrorTracker.GetErrorStats()
|
||||
metrics := m.MetricsCollector.GetMetrics()
|
||||
|
||||
status := map[string]interface{}{
|
||||
"status": "healthy",
|
||||
"timestamp": time.Now().UTC().Format(time.RFC3339),
|
||||
"errors": errorStats,
|
||||
"metrics": metrics,
|
||||
}
|
||||
|
||||
// Determine overall health based on error rates
|
||||
if recentErrors, ok := errorStats["recent_errors"].(map[errors.ErrorCode]int); ok {
|
||||
totalRecentErrors := 0
|
||||
for _, count := range recentErrors {
|
||||
totalRecentErrors += count
|
||||
}
|
||||
|
||||
if totalRecentErrors > 100 { // More than 100 errors in the last hour
|
||||
status["status"] = "degraded"
|
||||
}
|
||||
|
||||
if totalRecentErrors > 500 { // More than 500 errors in the last hour
|
||||
status["status"] = "unhealthy"
|
||||
}
|
||||
}
|
||||
|
||||
return status
|
||||
}
|
||||
|
||||
// Global monitor instance
|
||||
var globalMonitor = NewMonitor()
|
||||
|
||||
// GetGlobalMonitor returns the global monitor instance
|
||||
func GetGlobalMonitor() *Monitor {
|
||||
return globalMonitor
|
||||
}
|
||||
@ -1,12 +1,15 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"attune-heart-therapy/internal/config"
|
||||
"attune-heart-therapy/internal/database"
|
||||
"attune-heart-therapy/internal/handlers"
|
||||
"attune-heart-therapy/internal/container"
|
||||
"attune-heart-therapy/internal/health"
|
||||
"attune-heart-therapy/internal/logger"
|
||||
"attune-heart-therapy/internal/middleware"
|
||||
"attune-heart-therapy/internal/services"
|
||||
|
||||
@ -34,14 +37,13 @@ func (m *MiddlewareContainer) StrictRateLimit() gin.HandlerFunc {
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
config *config.Config
|
||||
db *database.DB
|
||||
router *gin.Engine
|
||||
middleware *MiddlewareContainer
|
||||
authHandler *handlers.AuthHandler
|
||||
paymentHandler *handlers.PaymentHandler
|
||||
bookingHandler *handlers.BookingHandler
|
||||
adminHandler *handlers.AdminHandler
|
||||
config *config.Config
|
||||
container *container.Container
|
||||
router *gin.Engine
|
||||
middleware *MiddlewareContainer
|
||||
httpServer *http.Server
|
||||
healthChecker *health.Checker
|
||||
log *logger.Logger
|
||||
}
|
||||
|
||||
func New(cfg *config.Config) *Server {
|
||||
@ -53,15 +55,26 @@ func New(cfg *config.Config) *Server {
|
||||
// Configure middleware stack
|
||||
setupMiddlewareStack(router)
|
||||
|
||||
// Initialize health checker
|
||||
healthChecker := health.NewChecker()
|
||||
|
||||
// Initialize logger
|
||||
log := logger.New("server")
|
||||
|
||||
return &Server{
|
||||
config: cfg,
|
||||
router: router,
|
||||
config: cfg,
|
||||
router: router,
|
||||
healthChecker: healthChecker,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
// setupMiddlewareStack configures the Gin middleware stack
|
||||
func setupMiddlewareStack(router *gin.Engine) {
|
||||
// Security middleware - should be first
|
||||
// Tracing middleware - should be first to ensure trace ID is available
|
||||
router.Use(middleware.TracingMiddleware())
|
||||
|
||||
// Security middleware
|
||||
router.Use(middleware.SecurityMiddleware())
|
||||
|
||||
// CORS middleware for frontend integration
|
||||
@ -70,41 +83,47 @@ func setupMiddlewareStack(router *gin.Engine) {
|
||||
// Request logging middleware
|
||||
router.Use(middleware.StructuredLoggingMiddleware())
|
||||
|
||||
// Recovery middleware to handle panics
|
||||
router.Use(gin.Recovery())
|
||||
// Monitoring middleware for error tracking and metrics
|
||||
router.Use(middleware.HealthCheckMiddleware())
|
||||
|
||||
// Error handling and recovery middleware
|
||||
router.Use(middleware.ErrorHandlerMiddleware())
|
||||
|
||||
// Rate limiting middleware for API protection
|
||||
router.Use(middleware.RateLimitMiddleware())
|
||||
|
||||
// Handle 404 and 405 errors
|
||||
router.NoRoute(middleware.NotFoundHandler())
|
||||
router.NoMethod(middleware.MethodNotAllowedHandler())
|
||||
}
|
||||
|
||||
// Initialize sets up the database connection and runs migrations
|
||||
// SetContainer sets the dependency injection container
|
||||
func (s *Server) SetContainer(container *container.Container) {
|
||||
s.container = container
|
||||
}
|
||||
|
||||
// Initialize sets up all application dependencies
|
||||
func (s *Server) Initialize() error {
|
||||
// Initialize database connection
|
||||
db, err := database.New(s.config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to initialize database: %w", err)
|
||||
}
|
||||
s.db = db
|
||||
s.log.Info("Initializing server...")
|
||||
|
||||
// Run database migrations
|
||||
if err := s.db.Migrate(); err != nil {
|
||||
return fmt.Errorf("failed to run database migrations: %w", err)
|
||||
if s.container == nil {
|
||||
return fmt.Errorf("container not set - call SetContainer first")
|
||||
}
|
||||
|
||||
// Seed database with initial data
|
||||
if err := s.db.Seed(); err != nil {
|
||||
return fmt.Errorf("failed to seed database: %w", err)
|
||||
// Initialize middleware container with JWT service
|
||||
s.middleware = &MiddlewareContainer{
|
||||
jwtService: s.container.GetJWTService(),
|
||||
}
|
||||
|
||||
// Initialize services and handlers
|
||||
s.initializeServices()
|
||||
// Register health checks
|
||||
s.registerHealthChecks()
|
||||
|
||||
log.Println("Server initialization completed successfully")
|
||||
s.log.Info("Server initialization completed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) Start() error {
|
||||
// Initialize database and run migrations
|
||||
// Initialize server components
|
||||
if err := s.Initialize(); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -112,25 +131,60 @@ func (s *Server) Start() error {
|
||||
// Setup routes
|
||||
s.setupRoutes()
|
||||
|
||||
// Start server
|
||||
// Create HTTP server with proper configuration
|
||||
addr := fmt.Sprintf("%s:%s", s.config.Server.Host, s.config.Server.Port)
|
||||
log.Printf("Starting server on %s", addr)
|
||||
s.httpServer = &http.Server{
|
||||
Addr: addr,
|
||||
Handler: s.router,
|
||||
ReadTimeout: 30 * time.Second,
|
||||
WriteTimeout: 30 * time.Second,
|
||||
IdleTimeout: 120 * time.Second,
|
||||
}
|
||||
|
||||
return s.router.Run(addr)
|
||||
s.log.Info("Starting server", map[string]interface{}{
|
||||
"address": addr,
|
||||
})
|
||||
return s.httpServer.ListenAndServe()
|
||||
}
|
||||
|
||||
// Shutdown gracefully shuts down the server
|
||||
func (s *Server) Shutdown() error {
|
||||
if s.db != nil {
|
||||
log.Println("Closing database connection...")
|
||||
return s.db.Close()
|
||||
func (s *Server) Shutdown(ctx context.Context) error {
|
||||
s.log.Info("Shutting down server...")
|
||||
|
||||
var shutdownErrors []error
|
||||
|
||||
// Shutdown HTTP server gracefully
|
||||
if s.httpServer != nil {
|
||||
s.log.Info("Shutting down HTTP server...")
|
||||
if err := s.httpServer.Shutdown(ctx); err != nil {
|
||||
s.log.Error("Error shutting down HTTP server", err)
|
||||
shutdownErrors = append(shutdownErrors, fmt.Errorf("HTTP server shutdown error: %w", err))
|
||||
} else {
|
||||
s.log.Info("HTTP server shutdown completed")
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown application dependencies
|
||||
if s.container != nil {
|
||||
if err := s.container.Shutdown(); err != nil {
|
||||
s.log.Error("Error shutting down dependencies", err)
|
||||
shutdownErrors = append(shutdownErrors, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(shutdownErrors) > 0 {
|
||||
return fmt.Errorf("server shutdown completed with errors: %v", shutdownErrors)
|
||||
}
|
||||
|
||||
s.log.Info("Server shutdown completed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) setupRoutes() {
|
||||
// Health check endpoint - no middleware needed
|
||||
// Health check endpoints - no middleware needed
|
||||
s.router.GET("/health", s.healthCheck)
|
||||
s.router.GET("/health/detailed", s.detailedHealthCheck)
|
||||
s.router.GET("/metrics", s.metricsEndpoint)
|
||||
|
||||
// API v1 routes group with base middleware
|
||||
v1 := s.router.Group("/api")
|
||||
@ -143,15 +197,15 @@ func (s *Server) setupRoutes() {
|
||||
{
|
||||
// Apply strict rate limiting to auth endpoints
|
||||
auth.Use(s.middleware.StrictRateLimit())
|
||||
auth.POST("/register", s.authHandler.Register)
|
||||
auth.POST("/login", s.authHandler.Login)
|
||||
auth.POST("/register", s.container.AuthHandler.Register)
|
||||
auth.POST("/login", s.container.AuthHandler.Login)
|
||||
}
|
||||
|
||||
// Schedule routes - public endpoint for getting available slots
|
||||
public.GET("/schedules", s.bookingHandler.GetAvailableSlots)
|
||||
public.GET("/schedules", s.container.BookingHandler.GetAvailableSlots)
|
||||
|
||||
// Payment webhook - public endpoint for Stripe webhooks (no auth needed)
|
||||
public.POST("/payments/webhook", s.paymentHandler.HandleWebhook)
|
||||
public.POST("/payments/webhook", s.container.PaymentHandler.HandleWebhook)
|
||||
}
|
||||
|
||||
// Authenticated routes group - require JWT authentication
|
||||
@ -161,25 +215,25 @@ func (s *Server) setupRoutes() {
|
||||
// Auth profile routes - require authentication
|
||||
authProfile := authenticated.Group("/auth")
|
||||
{
|
||||
authProfile.GET("/profile", s.authHandler.GetProfile)
|
||||
authProfile.PUT("/profile", s.authHandler.UpdateProfile)
|
||||
authProfile.POST("/logout", s.authHandler.Logout)
|
||||
authProfile.GET("/profile", s.container.AuthHandler.GetProfile)
|
||||
authProfile.PUT("/profile", s.container.AuthHandler.UpdateProfile)
|
||||
authProfile.POST("/logout", s.container.AuthHandler.Logout)
|
||||
}
|
||||
|
||||
// Booking routes - require authentication
|
||||
bookings := authenticated.Group("/bookings")
|
||||
{
|
||||
bookings.GET("/", s.bookingHandler.GetUserBookings)
|
||||
bookings.POST("/", s.bookingHandler.CreateBooking)
|
||||
bookings.PUT("/:id/cancel", s.bookingHandler.CancelBooking)
|
||||
bookings.PUT("/:id/reschedule", s.bookingHandler.RescheduleBooking)
|
||||
bookings.GET("/", s.container.BookingHandler.GetUserBookings)
|
||||
bookings.POST("/", s.container.BookingHandler.CreateBooking)
|
||||
bookings.PUT("/:id/cancel", s.container.BookingHandler.CancelBooking)
|
||||
bookings.PUT("/:id/reschedule", s.container.BookingHandler.RescheduleBooking)
|
||||
}
|
||||
|
||||
// Payment routes - require authentication (except webhook)
|
||||
payments := authenticated.Group("/payments")
|
||||
{
|
||||
payments.POST("/intent", s.paymentHandler.CreatePaymentIntent)
|
||||
payments.POST("/confirm", s.paymentHandler.ConfirmPayment)
|
||||
payments.POST("/intent", s.container.PaymentHandler.CreatePaymentIntent)
|
||||
payments.POST("/confirm", s.container.PaymentHandler.ConfirmPayment)
|
||||
}
|
||||
}
|
||||
|
||||
@ -187,81 +241,77 @@ func (s *Server) setupRoutes() {
|
||||
admin := v1.Group("/admin")
|
||||
admin.Use(s.middleware.RequireAdmin())
|
||||
{
|
||||
admin.GET("/dashboard", s.adminHandler.GetDashboard)
|
||||
admin.POST("/schedules", s.adminHandler.CreateSchedule)
|
||||
admin.PUT("/schedules/:id", s.adminHandler.UpdateSchedule)
|
||||
admin.GET("/users", s.adminHandler.GetUsers)
|
||||
admin.GET("/bookings", s.adminHandler.GetBookings)
|
||||
admin.GET("/reports/financial", s.adminHandler.GetFinancialReports)
|
||||
admin.GET("/dashboard", s.container.AdminHandler.GetDashboard)
|
||||
admin.POST("/schedules", s.container.AdminHandler.CreateSchedule)
|
||||
admin.PUT("/schedules/:id", s.container.AdminHandler.UpdateSchedule)
|
||||
admin.GET("/users", s.container.AdminHandler.GetUsers)
|
||||
admin.GET("/bookings", s.container.AdminHandler.GetBookings)
|
||||
admin.GET("/reports/financial", s.container.AdminHandler.GetFinancialReports)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// initializeServices sets up all services and handlers
|
||||
func (s *Server) initializeServices() {
|
||||
// Initialize repositories
|
||||
repos := s.db.GetRepositories()
|
||||
|
||||
// Initialize Jitsi service
|
||||
jitsiService := services.NewJitsiService(&s.config.Jitsi)
|
||||
|
||||
// Initialize notification service
|
||||
notificationService := services.NewNotificationService(repos.Notification, s.config)
|
||||
|
||||
// Initialize JWT service (needed for user service and middleware)
|
||||
jwtService := services.NewJWTService(s.config.JWT.Secret, s.config.JWT.Expiration)
|
||||
|
||||
// Initialize middleware container with JWT service
|
||||
s.middleware = &MiddlewareContainer{
|
||||
jwtService: jwtService,
|
||||
}
|
||||
|
||||
// Initialize user service with notification integration
|
||||
userService := services.NewUserService(repos.User, jwtService, notificationService)
|
||||
|
||||
// Initialize payment service with notification integration
|
||||
paymentService := services.NewPaymentService(s.config, repos.Booking, repos.User, notificationService)
|
||||
|
||||
// Initialize booking service with notification integration
|
||||
bookingService := services.NewBookingService(
|
||||
repos.Booking,
|
||||
repos.Schedule,
|
||||
repos.User,
|
||||
jitsiService,
|
||||
paymentService,
|
||||
notificationService,
|
||||
)
|
||||
|
||||
// Initialize admin service
|
||||
adminService := services.NewAdminService(repos.User, repos.Booking, repos.Schedule)
|
||||
|
||||
// Initialize handlers
|
||||
s.authHandler = handlers.NewAuthHandler(userService)
|
||||
s.paymentHandler = handlers.NewPaymentHandler(paymentService)
|
||||
s.bookingHandler = handlers.NewBookingHandler(bookingService)
|
||||
s.adminHandler = handlers.NewAdminHandler(adminService)
|
||||
}
|
||||
|
||||
// healthCheck handles the health check endpoint
|
||||
// healthCheck handles the basic health check endpoint
|
||||
func (s *Server) healthCheck(c *gin.Context) {
|
||||
response := gin.H{
|
||||
"status": "ok",
|
||||
"message": "Video Conference Booking System API",
|
||||
// Simple health check - just return OK if server is running
|
||||
c.JSON(200, gin.H{
|
||||
"status": "ok",
|
||||
"message": "Video Conference Booking System API",
|
||||
"timestamp": time.Now().UTC().Format(time.RFC3339),
|
||||
})
|
||||
}
|
||||
|
||||
// detailedHealthCheck handles the detailed health check endpoint
|
||||
func (s *Server) detailedHealthCheck(c *gin.Context) {
|
||||
ctx := c.Request.Context()
|
||||
|
||||
// Perform comprehensive health checks
|
||||
response := s.healthChecker.BuildResponse(ctx)
|
||||
|
||||
// Determine HTTP status code based on health status
|
||||
statusCode := 200
|
||||
switch response.Status {
|
||||
case health.StatusUnhealthy:
|
||||
statusCode = 503
|
||||
case health.StatusDegraded:
|
||||
statusCode = 503
|
||||
}
|
||||
|
||||
// Check database connectivity
|
||||
if s.db != nil {
|
||||
if err := s.db.Health(); err != nil {
|
||||
response["status"] = "error"
|
||||
response["database"] = "disconnected"
|
||||
response["error"] = err.Error()
|
||||
c.JSON(500, response)
|
||||
return
|
||||
}
|
||||
response["database"] = "connected"
|
||||
} else {
|
||||
response["database"] = "not initialized"
|
||||
c.JSON(statusCode, response)
|
||||
}
|
||||
|
||||
// metricsEndpoint handles the metrics endpoint
|
||||
func (s *Server) metricsEndpoint(c *gin.Context) {
|
||||
// Import monitoring package to get metrics
|
||||
// Note: In a real implementation, you might want to use a proper metrics format like Prometheus
|
||||
response := map[string]interface{}{
|
||||
"status": "ok",
|
||||
"timestamp": time.Now().UTC().Format(time.RFC3339),
|
||||
"service": "Video Conference Booking System",
|
||||
"message": "Metrics endpoint - monitoring data would be available here",
|
||||
}
|
||||
|
||||
c.JSON(200, response)
|
||||
}
|
||||
|
||||
// registerHealthChecks registers all health checks
|
||||
func (s *Server) registerHealthChecks() {
|
||||
s.log.Info("Registering health checks...")
|
||||
|
||||
// Database health check
|
||||
s.healthChecker.RegisterCheck("database", health.DatabaseHealthCheck(s.container.Database))
|
||||
|
||||
// Job manager health check
|
||||
s.healthChecker.RegisterCheck("job_manager", health.JobManagerHealthCheck(s.container.JobManagerService))
|
||||
|
||||
// Monitoring system health check
|
||||
s.healthChecker.RegisterCheck("monitoring", health.MonitoringHealthCheck())
|
||||
|
||||
// Memory health check (example with 512MB limit)
|
||||
s.healthChecker.RegisterCheck("memory", health.MemoryHealthCheck(512))
|
||||
|
||||
// Disk space health check (example with 1GB minimum)
|
||||
s.healthChecker.RegisterCheck("disk_space", health.DiskSpaceHealthCheck("/", 1))
|
||||
|
||||
s.log.Info("Health checks registered successfully")
|
||||
}
|
||||
|
||||
@ -1,283 +0,0 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"attune-heart-therapy/internal/config"
|
||||
"attune-heart-therapy/internal/models"
|
||||
)
|
||||
|
||||
// MockBookingRepository for testing
|
||||
type MockBookingRepository struct {
|
||||
bookings map[uint]*models.Booking
|
||||
nextID uint
|
||||
}
|
||||
|
||||
func NewMockBookingRepository() *MockBookingRepository {
|
||||
return &MockBookingRepository{
|
||||
bookings: make(map[uint]*models.Booking),
|
||||
nextID: 1,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockBookingRepository) Create(booking *models.Booking) error {
|
||||
booking.ID = m.nextID
|
||||
m.nextID++
|
||||
m.bookings[booking.ID] = booking
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockBookingRepository) GetByID(id uint) (*models.Booking, error) {
|
||||
if booking, exists := m.bookings[id]; exists {
|
||||
return booking, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockBookingRepository) GetByUserID(userID uint) ([]models.Booking, error) {
|
||||
var result []models.Booking
|
||||
for _, booking := range m.bookings {
|
||||
if booking.UserID == userID {
|
||||
result = append(result, *booking)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (m *MockBookingRepository) Update(booking *models.Booking) error {
|
||||
m.bookings[booking.ID] = booking
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockBookingRepository) Delete(id uint) error {
|
||||
delete(m.bookings, id)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockBookingRepository) GetUpcomingBookings() ([]models.Booking, error) {
|
||||
var result []models.Booking
|
||||
now := time.Now()
|
||||
for _, booking := range m.bookings {
|
||||
if booking.Status == models.BookingStatusScheduled && booking.ScheduledAt.After(now) {
|
||||
result = append(result, *booking)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// MockScheduleRepository for testing
|
||||
type MockScheduleRepository struct {
|
||||
schedules map[uint]*models.Schedule
|
||||
}
|
||||
|
||||
func NewMockScheduleRepository() *MockScheduleRepository {
|
||||
return &MockScheduleRepository{
|
||||
schedules: make(map[uint]*models.Schedule),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockScheduleRepository) Create(schedule *models.Schedule) error {
|
||||
m.schedules[schedule.ID] = schedule
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockScheduleRepository) GetAvailable(date time.Time) ([]models.Schedule, error) {
|
||||
var result []models.Schedule
|
||||
for _, schedule := range m.schedules {
|
||||
if schedule.IsAvailable && schedule.BookedCount < schedule.MaxBookings {
|
||||
result = append(result, *schedule)
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (m *MockScheduleRepository) Update(schedule *models.Schedule) error {
|
||||
m.schedules[schedule.ID] = schedule
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockScheduleRepository) GetByID(id uint) (*models.Schedule, error) {
|
||||
if schedule, exists := m.schedules[id]; exists {
|
||||
return schedule, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockScheduleRepository) IncrementBookedCount(scheduleID uint) error {
|
||||
if schedule, exists := m.schedules[scheduleID]; exists {
|
||||
schedule.BookedCount++
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockScheduleRepository) DecrementBookedCount(scheduleID uint) error {
|
||||
if schedule, exists := m.schedules[scheduleID]; exists {
|
||||
schedule.BookedCount--
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockBookingRepository) GetByPaymentID(paymentID string) (*models.Booking, error) {
|
||||
for _, booking := range m.bookings {
|
||||
if booking.PaymentID == paymentID {
|
||||
return booking, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// MockUserRepository for testing
|
||||
type MockUserRepository struct {
|
||||
users map[uint]*models.User
|
||||
}
|
||||
|
||||
func NewMockUserRepository() *MockUserRepository {
|
||||
return &MockUserRepository{
|
||||
users: make(map[uint]*models.User),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MockUserRepository) Create(user *models.User) error {
|
||||
m.users[user.ID] = user
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockUserRepository) GetByID(id uint) (*models.User, error) {
|
||||
if user, exists := m.users[id]; exists {
|
||||
return user, nil
|
||||
}
|
||||
user := &models.User{
|
||||
Email: "test@example.com",
|
||||
FirstName: "Test",
|
||||
}
|
||||
user.ID = id
|
||||
return user, nil
|
||||
}
|
||||
|
||||
func (m *MockUserRepository) GetByEmail(email string) (*models.User, error) {
|
||||
for _, user := range m.users {
|
||||
if user.Email == email {
|
||||
return user, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *MockUserRepository) Update(user *models.User) error {
|
||||
m.users[user.ID] = user
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockUserRepository) GetActiveUsersCount() (int64, error) {
|
||||
return int64(len(m.users)), nil
|
||||
}
|
||||
|
||||
// MockNotificationService for testing
|
||||
type MockNotificationService struct{}
|
||||
|
||||
func (m *MockNotificationService) SendWelcomeEmail(user *models.User) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockNotificationService) SendPaymentNotification(user *models.User, booking *models.Booking, success bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockNotificationService) SendMeetingInfo(user *models.User, booking *models.Booking) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockNotificationService) SendReminder(user *models.User, booking *models.Booking) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockNotificationService) ScheduleReminder(bookingID uint, reminderTime time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MockNotificationService) ProcessPendingNotifications() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestBookingService_CreateBookingWithJitsiIntegration(t *testing.T) {
|
||||
// Setup mock repositories
|
||||
bookingRepo := NewMockBookingRepository()
|
||||
scheduleRepo := NewMockScheduleRepository()
|
||||
userRepo := NewMockUserRepository()
|
||||
|
||||
// Setup Jitsi service
|
||||
jitsiConfig := &config.JitsiConfig{
|
||||
BaseURL: "https://meet.jit.si",
|
||||
}
|
||||
jitsiService := NewJitsiService(jitsiConfig)
|
||||
|
||||
// Setup mock services
|
||||
var paymentService PaymentService
|
||||
notificationService := &MockNotificationService{}
|
||||
|
||||
// Create booking service
|
||||
bookingService := NewBookingService(bookingRepo, scheduleRepo, userRepo, jitsiService, paymentService, notificationService)
|
||||
|
||||
// Create a test schedule
|
||||
schedule := &models.Schedule{
|
||||
StartTime: time.Now().Add(24 * time.Hour),
|
||||
EndTime: time.Now().Add(25 * time.Hour),
|
||||
IsAvailable: true,
|
||||
MaxBookings: 1,
|
||||
BookedCount: 0,
|
||||
}
|
||||
schedule.ID = 1
|
||||
scheduleRepo.schedules[1] = schedule
|
||||
|
||||
// Create booking request
|
||||
req := BookingRequest{
|
||||
ScheduleID: 1,
|
||||
Amount: 100.0,
|
||||
Notes: "Test booking with Jitsi integration",
|
||||
}
|
||||
|
||||
// Create booking
|
||||
booking, err := bookingService.CreateBooking(123, req)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, got %v", err)
|
||||
}
|
||||
|
||||
if booking == nil {
|
||||
t.Fatal("Expected booking to be created, got nil")
|
||||
}
|
||||
|
||||
// Verify booking details
|
||||
if booking.UserID != 123 {
|
||||
t.Errorf("Expected user ID 123, got %d", booking.UserID)
|
||||
}
|
||||
|
||||
if booking.Amount != 100.0 {
|
||||
t.Errorf("Expected amount 100.0, got %f", booking.Amount)
|
||||
}
|
||||
|
||||
// Verify Jitsi integration
|
||||
if booking.JitsiRoomID == "" {
|
||||
t.Error("Expected Jitsi room ID to be set")
|
||||
}
|
||||
|
||||
if booking.JitsiRoomURL == "" {
|
||||
t.Error("Expected Jitsi room URL to be set")
|
||||
}
|
||||
|
||||
// Verify URL format
|
||||
expectedPrefix := "https://meet.jit.si/"
|
||||
if len(booking.JitsiRoomURL) <= len(expectedPrefix) {
|
||||
t.Error("Expected room URL to contain room ID")
|
||||
}
|
||||
|
||||
if booking.JitsiRoomURL[:len(expectedPrefix)] != expectedPrefix {
|
||||
t.Errorf("Expected room URL to start with %s, got %s", expectedPrefix, booking.JitsiRoomURL)
|
||||
}
|
||||
|
||||
// Verify schedule booking count was incremented
|
||||
updatedSchedule, _ := scheduleRepo.GetByID(1)
|
||||
if updatedSchedule.BookedCount != 1 {
|
||||
t.Errorf("Expected booked count to be 1, got %d", updatedSchedule.BookedCount)
|
||||
}
|
||||
}
|
||||
@ -17,6 +17,7 @@ type bookingService struct {
|
||||
jitsiService JitsiService
|
||||
paymentService PaymentService
|
||||
notificationService NotificationService
|
||||
jobManagerService JobManagerService
|
||||
}
|
||||
|
||||
// NewBookingService creates a new instance of BookingService
|
||||
@ -27,6 +28,7 @@ func NewBookingService(
|
||||
jitsiService JitsiService,
|
||||
paymentService PaymentService,
|
||||
notificationService NotificationService,
|
||||
jobManagerService JobManagerService,
|
||||
) BookingService {
|
||||
return &bookingService{
|
||||
bookingRepo: bookingRepo,
|
||||
@ -35,6 +37,7 @@ func NewBookingService(
|
||||
jitsiService: jitsiService,
|
||||
paymentService: paymentService,
|
||||
notificationService: notificationService,
|
||||
jobManagerService: jobManagerService,
|
||||
}
|
||||
}
|
||||
|
||||
@ -118,6 +121,16 @@ func (s *bookingService) CreateBooking(userID uint, req BookingRequest) (*models
|
||||
}
|
||||
}
|
||||
|
||||
// Schedule reminder notifications for the booking
|
||||
if s.jobManagerService != nil && s.jobManagerService.IsRunning() {
|
||||
if err := s.jobManagerService.ScheduleRemindersForBooking(booking.ID, userID, booking.ScheduledAt); err != nil {
|
||||
log.Printf("Failed to schedule reminders for booking %d: %v", booking.ID, err)
|
||||
// Don't fail the booking creation if reminder scheduling fails
|
||||
} else {
|
||||
log.Printf("Successfully scheduled reminders for booking %d", booking.ID)
|
||||
}
|
||||
}
|
||||
|
||||
return booking, nil
|
||||
}
|
||||
|
||||
@ -168,6 +181,16 @@ func (s *bookingService) CancelBooking(userID, bookingID uint) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Cancel scheduled reminders for the booking
|
||||
if s.jobManagerService != nil && s.jobManagerService.IsRunning() {
|
||||
if err := s.jobManagerService.CancelRemindersForBooking(bookingID); err != nil {
|
||||
log.Printf("Failed to cancel reminders for booking %d: %v", bookingID, err)
|
||||
// Don't fail the cancellation if reminder cleanup fails
|
||||
} else {
|
||||
log.Printf("Successfully cancelled reminders for booking %d", bookingID)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Successfully cancelled booking %d for user %d", bookingID, userID)
|
||||
return nil
|
||||
}
|
||||
@ -235,6 +258,21 @@ func (s *bookingService) RescheduleBooking(userID, bookingID uint, newScheduleID
|
||||
log.Printf("Failed to increment booked count for new schedule %d: %v", newScheduleID, err)
|
||||
}
|
||||
|
||||
// Cancel old reminders and schedule new ones
|
||||
if s.jobManagerService != nil && s.jobManagerService.IsRunning() {
|
||||
// Cancel existing reminders
|
||||
if err := s.jobManagerService.CancelRemindersForBooking(bookingID); err != nil {
|
||||
log.Printf("Failed to cancel old reminders for rescheduled booking %d: %v", bookingID, err)
|
||||
}
|
||||
|
||||
// Schedule new reminders with the new meeting time
|
||||
if err := s.jobManagerService.ScheduleRemindersForBooking(bookingID, userID, booking.ScheduledAt); err != nil {
|
||||
log.Printf("Failed to schedule new reminders for rescheduled booking %d: %v", bookingID, err)
|
||||
} else {
|
||||
log.Printf("Successfully rescheduled reminders for booking %d", bookingID)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("Successfully rescheduled booking %d for user %d to schedule %d", bookingID, userID, newScheduleID)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -60,6 +60,15 @@ type AdminService interface {
|
||||
GetAllBookings(limit, offset int) ([]models.Booking, int64, error)
|
||||
}
|
||||
|
||||
// JobManagerService handles background job operations
|
||||
type JobManagerService interface {
|
||||
Start() error
|
||||
Stop() error
|
||||
IsRunning() bool
|
||||
ScheduleRemindersForBooking(bookingID uint, userID uint, meetingTime time.Time) error
|
||||
CancelRemindersForBooking(bookingID uint) error
|
||||
}
|
||||
|
||||
// JitsiMeeting represents a Jitsi meeting
|
||||
type JitsiMeeting struct {
|
||||
RoomID string `json:"room_id"`
|
||||
|
||||
54
internal/services/job_manager_service.go
Normal file
54
internal/services/job_manager_service.go
Normal file
@ -0,0 +1,54 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"attune-heart-therapy/internal/jobs"
|
||||
"attune-heart-therapy/internal/repositories"
|
||||
)
|
||||
|
||||
// jobManagerService implements the JobManagerService interface
|
||||
type jobManagerService struct {
|
||||
manager *jobs.Manager
|
||||
}
|
||||
|
||||
// NewJobManagerService creates a new job manager service
|
||||
func NewJobManagerService(
|
||||
notificationService NotificationService,
|
||||
bookingRepo repositories.BookingRepository,
|
||||
userRepo repositories.UserRepository,
|
||||
) JobManagerService {
|
||||
// Use default config from jobs package
|
||||
manager := jobs.NewManager(notificationService, bookingRepo, userRepo, nil)
|
||||
|
||||
return &jobManagerService{
|
||||
manager: manager,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the job manager
|
||||
func (s *jobManagerService) Start() error {
|
||||
return s.manager.Start()
|
||||
}
|
||||
|
||||
// Stop stops the job manager
|
||||
func (s *jobManagerService) Stop() error {
|
||||
return s.manager.Stop()
|
||||
}
|
||||
|
||||
// IsRunning returns whether the job manager is running
|
||||
func (s *jobManagerService) IsRunning() bool {
|
||||
return s.manager.IsRunning()
|
||||
}
|
||||
|
||||
// ScheduleRemindersForBooking schedules reminders for a booking
|
||||
func (s *jobManagerService) ScheduleRemindersForBooking(bookingID uint, userID uint, meetingTime time.Time) error {
|
||||
reminderScheduler := s.manager.GetReminderScheduler()
|
||||
return reminderScheduler.ScheduleRemindersForBooking(bookingID, userID, meetingTime)
|
||||
}
|
||||
|
||||
// CancelRemindersForBooking cancels reminders for a booking
|
||||
func (s *jobManagerService) CancelRemindersForBooking(bookingID uint) error {
|
||||
reminderScheduler := s.manager.GetReminderScheduler()
|
||||
return reminderScheduler.CancelRemindersForBooking(bookingID)
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user