123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202 |
- package suggestions
- import (
- "bytes"
- "encoding/json"
- "fmt"
- "net/http"
- "strings"
- "time"
- "git.linuxforward.com/byom/byom-trends/common"
- "git.linuxforward.com/byom/byom-trends/config"
- "git.linuxforward.com/byom/byom-trends/logger"
- )
- type Analyzer struct {
- httpClient *http.Client
- config *config.LiteLLMConfig
- logger *logger.Entry
- }
- type llmRequest struct {
- Model string `json:"model"`
- Messages []message `json:"messages"`
- Temperature float32 `json:"temperature"`
- }
- type message struct {
- Role string `json:"role"`
- Content string `json:"content"`
- }
- type llmResponse struct {
- Choices []struct {
- Message struct {
- Content string `json:"content"`
- } `json:"message"`
- } `json:"choices"`
- }
- func NewAnalyzer(cfg *config.LiteLLMConfig) (*Analyzer, error) {
- if cfg.ProxyURL == "" {
- return nil, fmt.Errorf("LiteLLM proxy URL not configured")
- }
- return &Analyzer{
- httpClient: &http.Client{
- Timeout: time.Duration(cfg.TimeoutSecs) * time.Second,
- },
- config: cfg,
- logger: logger.NewLogger("suggestions-analyzer"),
- }, nil
- }
- func (a *Analyzer) AnalyzeTrendsForProducts(trends []common.ContentTrend) ([]common.ProductSuggestion, error) {
- a.logger.Info("Analyzing trends for product suggestions")
- // Build prompt for LLM
- prompt := a.buildTrendAnalysisPrompt(trends)
- // Query LLM
- suggestions, err := a.queryLLM(prompt)
- if err != nil {
- return nil, fmt.Errorf("query LLM: %w", err)
- }
- // Parse response
- return a.parseLLMResponse(suggestions)
- }
- func (a *Analyzer) buildTrendAnalysisPrompt(trends []common.ContentTrend) string {
- var trendDescription string
- for _, trend := range trends {
- trendDescription += fmt.Sprintf("\nPlatform: %s\nType: %s\n",
- trend.Platform, trend.TrendType)
- for _, item := range trend.Items {
- trendDescription += fmt.Sprintf("- %s (frequency: %d, growth rate: %.2f%%)\n",
- item.Value, item.Frequency, item.GrowthRate)
- }
- }
- return fmt.Sprintf(`As a market analysis expert, analyze the following social trends:
- %s
- Suggest 5 product or service ideas that could capitalize on these trends.
- For each suggestion, provide:
- 1. Product/service name
- 2. Short description
- 3. Target audience
- 4. Relevance score (1-10)
- 5. Success factors
- Expected JSON format:
- {
- "suggestions": [
- {
- "name": "string",
- "description": "string",
- "target_audience": "string",
- "relevance_score": number,
- "success_factors": ["string"]
- }
- ]
- }`, trendDescription)
- }
- func (a *Analyzer) queryLLM(prompt string) (string, error) {
- a.logger.WithField("prompt", prompt).Debug("Querying LiteLLM")
- request := llmRequest{
- Model: a.config.Model,
- Messages: []message{
- {
- Role: "system",
- Content: "You are a market analysis expert specialized in identifying business opportunities based on social trends.",
- },
- {
- Role: "user",
- Content: prompt,
- },
- },
- Temperature: 0.7,
- }
- reqBody, err := json.Marshal(request)
- if err != nil {
- return "", fmt.Errorf("marshal request: %w", err)
- }
- req, err := http.NewRequest("POST", a.config.ProxyURL+"/chat/completions", bytes.NewBuffer(reqBody))
- if err != nil {
- return "", fmt.Errorf("create request: %w", err)
- }
- // Add headers
- req.Header.Set("Content-Type", "application/json")
- if a.config.APIKey != "" {
- req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", a.config.APIKey))
- }
- for key, value := range a.config.Headers {
- req.Header.Set(key, value)
- }
- var resp *http.Response
- var lastErr error
- // Implement retry logic
- for attempt := 0; attempt <= a.config.MaxRetries; attempt++ {
- resp, err = a.httpClient.Do(req)
- if err == nil && resp.StatusCode == http.StatusOK {
- break
- }
- lastErr = err
- if resp != nil {
- resp.Body.Close()
- }
- // Exponential backoff
- if attempt < a.config.MaxRetries {
- time.Sleep(time.Duration(1<<attempt) * time.Second)
- }
- }
- if lastErr != nil {
- return "", fmt.Errorf("failed after %d retries: %w", a.config.MaxRetries, lastErr)
- }
- defer resp.Body.Close()
- var llmResp llmResponse
- if err := json.NewDecoder(resp.Body).Decode(&llmResp); err != nil {
- return "", fmt.Errorf("decode response: %w", err)
- }
- if len(llmResp.Choices) == 0 {
- return "", fmt.Errorf("no response content received")
- }
- content := llmResp.Choices[0].Message.Content
- // Extract JSON from content
- start := strings.Index(content, "{")
- end := strings.LastIndex(content, "}")
- if start == -1 || end == -1 {
- return "", fmt.Errorf("no valid JSON found in response")
- }
- return content[start : end+1], nil
- }
- func (a *Analyzer) parseLLMResponse(response string) ([]common.ProductSuggestion, error) {
- var parsed struct {
- Suggestions []common.ProductSuggestion `json:"suggestions"`
- }
- if err := json.Unmarshal([]byte(response), &parsed); err != nil {
- return nil, fmt.Errorf("parse LLM response: %w", err)
- }
- return parsed.Suggestions, nil
- }
|