234 lines
7.0 KiB
Go
Raw Normal View History

2025-03-25 03:52:30 -04:00
package moderation
import (
"errors"
"log"
"strings"
"time"
"gorm.io/gorm"
)
// ModerationService handles business logic for content moderation
type ModerationService struct {
db *gorm.DB
}
// NewModerationService creates a new moderation service
func NewModerationService(db *gorm.DB) *ModerationService {
// Auto-migrate the schema
db.AutoMigrate(&ContentFlag{}, &ModerationQueueItem{})
return &ModerationService{
db: db,
}
}
// FlagContent flags content for moderation
func (s *ModerationService) FlagContent(contentID, reason, flaggerAddress string) (*ContentFlag, error) {
// Create a new flag
flag := &ContentFlag{
ContentID: contentID,
Reason: reason,
FlaggerAddress: flaggerAddress,
Timestamp: time.Now(),
}
// Save the flag to the database
if err := s.db.Create(flag).Error; err != nil {
log.Printf("Error creating flag: %v", err)
return nil, err
}
// Count how many flags this content has
var flagCount int64
if err := s.db.Model(&ContentFlag{}).Where("content_id = ?", contentID).Count(&flagCount).Error; err != nil {
log.Printf("Error counting flags: %v", err)
return nil, err
}
// If content is flagged 3 or more times, add to moderation queue
if flagCount >= 3 {
// Check if already in queue
var existingItem ModerationQueueItem
result := s.db.Where("content_id = ?", contentID).First(&existingItem)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
// Add to queue
queueItem := &ModerationQueueItem{
ContentID: contentID,
FlagCount: int(flagCount),
Reason: "User flagged",
Status: "pending",
AIFlagged: false,
}
if err := s.db.Create(queueItem).Error; err != nil {
log.Printf("Error adding to moderation queue: %v", err)
return nil, err
}
} else {
log.Printf("Error checking moderation queue: %v", result.Error)
return nil, result.Error
}
} else {
// Update flag count
existingItem.FlagCount = int(flagCount)
if err := s.db.Save(&existingItem).Error; err != nil {
log.Printf("Error updating moderation queue item: %v", err)
return nil, err
}
}
}
return flag, nil
}
// ModerateContent approves or rejects content in the moderation queue
func (s *ModerationService) ModerateContent(contentID, decision string) error {
var queueItem ModerationQueueItem
if err := s.db.Where("content_id = ?", contentID).First(&queueItem).Error; err != nil {
log.Printf("Error finding queue item: %v", err)
return err
}
// Update status based on decision
if decision == "approve" {
queueItem.Status = "approved"
} else if decision == "reject" {
queueItem.Status = "rejected"
} else {
return errors.New("invalid decision: must be 'approve' or 'reject'")
}
// Save the updated queue item
if err := s.db.Save(&queueItem).Error; err != nil {
log.Printf("Error updating queue item: %v", err)
return err
}
return nil
}
// GetModerationQueue returns all pending items in the moderation queue
func (s *ModerationService) GetModerationQueue() ([]ModerationQueueItem, error) {
var queueItems []ModerationQueueItem
if err := s.db.Where("status = ?", "pending").Find(&queueItems).Error; err != nil {
log.Printf("Error getting moderation queue: %v", err)
return nil, err
}
return queueItems, nil
}
// RunAIModeration runs AI moderation on content (mock implementation)
func (s *ModerationService) RunAIModeration(content *ContentToModerate) (bool, string, error) {
if content.IsVerified {
// Skip AI moderation for verified users
return false, "", nil
}
// Mock implementation of content moderation
// In a real implementation, this would call an AI moderation service
text := strings.ToLower(content.Text)
// Simple keyword check for demonstration
spamKeywords := []string{"spam", "viagra", "casino", "xxx", "lottery", "prize", "winner", "free money"}
for _, keyword := range spamKeywords {
if strings.Contains(text, keyword) {
// Add to moderation queue
queueItem := &ModerationQueueItem{
ContentID: content.ContentID,
FlagCount: 1, // AI counts as one flag
Reason: "AI-flagged: potential spam",
Status: "pending",
AIFlagged: true,
}
// Check if already in queue
var existingItem ModerationQueueItem
result := s.db.Where("content_id = ?", content.ContentID).First(&existingItem)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
// Add to queue
if err := s.db.Create(queueItem).Error; err != nil {
log.Printf("Error adding AI-flagged content to moderation queue: %v", err)
return true, "AI-flagged: potential spam", err
}
} else {
log.Printf("Error checking moderation queue: %v", result.Error)
return true, "AI-flagged: potential spam", result.Error
}
} else {
// Update reason and AI flag
existingItem.Reason = "AI-flagged: potential spam"
existingItem.AIFlagged = true
if err := s.db.Save(&existingItem).Error; err != nil {
log.Printf("Error updating moderation queue item: %v", err)
return true, "AI-flagged: potential spam", err
}
}
return true, "AI-flagged: potential spam", nil
}
}
// Check for harmful content
harmfulKeywords := []string{"kill", "die", "hate", "attack", "bomb", "threat", "hack"}
for _, keyword := range harmfulKeywords {
if strings.Contains(text, keyword) {
// Add to moderation queue
queueItem := &ModerationQueueItem{
ContentID: content.ContentID,
FlagCount: 1, // AI counts as one flag
Reason: "AI-flagged: potential harmful content",
Status: "pending",
AIFlagged: true,
}
// Check if already in queue
var existingItem ModerationQueueItem
result := s.db.Where("content_id = ?", content.ContentID).First(&existingItem)
if result.Error != nil {
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
// Add to queue
if err := s.db.Create(queueItem).Error; err != nil {
log.Printf("Error adding AI-flagged content to moderation queue: %v", err)
return true, "AI-flagged: potential harmful content", err
}
} else {
log.Printf("Error checking moderation queue: %v", result.Error)
return true, "AI-flagged: potential harmful content", result.Error
}
} else {
// Update reason and AI flag
existingItem.Reason = "AI-flagged: potential harmful content"
existingItem.AIFlagged = true
if err := s.db.Save(&existingItem).Error; err != nil {
log.Printf("Error updating moderation queue item: %v", err)
return true, "AI-flagged: potential harmful content", err
}
}
return true, "AI-flagged: potential harmful content", nil
}
}
return false, "", nil
}
// CheckEconomicBarrier checks if the user has paid the required fee (mock implementation)
func (s *ModerationService) CheckEconomicBarrier(userAddress string, isVerified bool) (bool, error) {
if isVerified {
// Verified users don't need to pay a fee
return true, nil
}
// Mock implementation - always return true
// In a real implementation, this would check the blockchain
return true, nil
}