package clients import ( "context" "fmt" "os" "path/filepath" "strings" "git.linuxforward.com/byop/byop-engine/models" "github.com/docker/cli/cli/config/configfile" clitypes "github.com/docker/cli/cli/config/types" "github.com/moby/buildkit/client" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/auth/authprovider" "github.com/sirupsen/logrus" "github.com/tonistiigi/fsutil" "golang.org/x/sync/errgroup" ) // DockerfileBuilder implements BuildMachineClient using Dockerfile-based builds // Inspired by the buildkit example provided type DockerfileBuilder struct { buildkitHost string entry *logrus.Entry } // NewDockerfileBuilder creates a new DockerfileBuilder func NewDockerfileBuilder(buildkitHost string) BuildMachineClient { return &DockerfileBuilder{ buildkitHost: buildkitHost, entry: logrus.WithField("component", "DockerfileBuilder"), } } // BuildImage builds a Docker image using BuildKit with Dockerfile frontend func (db *DockerfileBuilder) BuildImage(ctx context.Context, job models.BuildJob, dockerfilePath string, contextPath string, imageName string, imageTag string, noCache bool, buildArgs map[string]string) (string, error) { db.entry.Infof("Job %d: Building image %s:%s using Dockerfile approach", job.ID, imageName, imageTag) c, err := client.New(ctx, db.buildkitHost) if err != nil { return "", fmt.Errorf("job %d: failed to create BuildKit client: %w", job.ID, err) } defer c.Close() // If we have generated Dockerfile content, write it to the build context if job.DockerfileContent != "" { dockerfilePath = filepath.Join(contextPath, "Dockerfile") if err := os.WriteFile(dockerfilePath, []byte(job.DockerfileContent), 0644); err != nil { return "", fmt.Errorf("job %d: failed to write generated Dockerfile: %w", job.ID, err) } db.entry.Infof("Job %d: Wrote generated Dockerfile to %s", job.ID, dockerfilePath) // Debug: Log first few lines of Dockerfile to verify content lines := strings.Split(job.DockerfileContent, "\n") if len(lines) > 10 { lines = lines[:10] } db.entry.Infof("Job %d: Generated Dockerfile first 10 lines:\n%s", job.ID, strings.Join(lines, "\n")) // Debug: Check if go.sum exists in build context goSumPath := filepath.Join(contextPath, "go.sum") if _, err := os.Stat(goSumPath); err == nil { db.entry.Infof("Job %d: go.sum EXISTS in build context %s", job.ID, contextPath) } else { db.entry.Infof("Job %d: go.sum DOES NOT EXIST in build context %s", job.ID, contextPath) } } solveOpt, err := db.newSolveOpt(ctx, job, contextPath, dockerfilePath, imageName, imageTag, noCache, buildArgs) if err != nil { return "", fmt.Errorf("job %d: failed to create solve options: %w", job.ID, err) } ch := make(chan *client.SolveStatus) eg, gctx := errgroup.WithContext(ctx) var buildOutput strings.Builder var solveResp *client.SolveResponse // Start the build eg.Go(func() error { var err error solveResp, err = c.Solve(gctx, nil, *solveOpt, ch) if err != nil { return fmt.Errorf("BuildKit solve failed: %w", err) } return nil }) // Collect build output eg.Go(func() error { for status := range ch { for _, v := range status.Vertexes { if v.Error != "" { buildOutput.WriteString(fmt.Sprintf("Vertex Error: %s: %s\n", v.Name, v.Error)) } } for _, l := range status.Logs { buildOutput.Write(l.Data) } } return nil }) if err := eg.Wait(); err != nil { db.entry.Errorf("Job %d: Build failed: %v. Output:\n%s", job.ID, err, buildOutput.String()) return buildOutput.String(), fmt.Errorf("build failed: %w", err) } db.entry.Infof("Job %d: Image %s:%s built successfully", job.ID, imageName, imageTag) // Return digest if available if solveResp != nil && solveResp.ExporterResponse != nil { if digest, ok := solveResp.ExporterResponse["containerimage.digest"]; ok { return digest, nil } } return buildOutput.String(), nil } // newSolveOpt creates solve options for Dockerfile builds, similar to the provided example func (db *DockerfileBuilder) newSolveOpt(ctx context.Context, job models.BuildJob, buildContext, dockerfilePath, imageName, imageTag string, noCache bool, buildArgs map[string]string) (*client.SolveOpt, error) { if buildContext == "" { return nil, fmt.Errorf("build context cannot be empty") } if dockerfilePath == "" { dockerfilePath = filepath.Join(buildContext, "Dockerfile") } // Create filesystem for build context contextFS, err := fsutil.NewFS(buildContext) if err != nil { return nil, fmt.Errorf("invalid build context: %w", err) } // Create filesystem for dockerfile directory dockerfileFS, err := fsutil.NewFS(filepath.Dir(dockerfilePath)) if err != nil { return nil, fmt.Errorf("invalid dockerfile directory: %w", err) } fullImageName := fmt.Sprintf("%s:%s", imageName, imageTag) if job.RegistryURL != "" { fullImageName = fmt.Sprintf("%s/%s:%s", job.RegistryURL, imageName, imageTag) } // Frontend attributes for dockerfile build frontendAttrs := map[string]string{ "filename": filepath.Base(dockerfilePath), } if noCache { frontendAttrs["no-cache"] = "" } // Add build args for key, value := range buildArgs { frontendAttrs["build-arg:"+key] = value } solveOpt := &client.SolveOpt{ Exports: []client.ExportEntry{ { Type: client.ExporterImage, Attrs: map[string]string{ "name": fullImageName, }, }, }, LocalMounts: map[string]fsutil.FS{ "context": contextFS, "dockerfile": dockerfileFS, }, Frontend: "dockerfile.v0", // Use dockerfile frontend FrontendAttrs: frontendAttrs, } // Setup authentication if registry credentials are provided if job.RegistryURL != "" && job.RegistryUser != "" && job.RegistryPassword != "" { authConfig := authprovider.DockerAuthProviderConfig{ ConfigFile: &configfile.ConfigFile{ AuthConfigs: map[string]clitypes.AuthConfig{ job.RegistryURL: { Username: job.RegistryUser, Password: job.RegistryPassword, }, }, }, } solveOpt.Session = []session.Attachable{ authprovider.NewDockerAuthProvider(authConfig), } } return solveOpt, nil } // PushImage pushes the built image to registry func (db *DockerfileBuilder) PushImage(ctx context.Context, job models.BuildJob, fullImageURI string, registryURL string, username string, password string) error { db.entry.Infof("Job %d: Pushing image %s to registry", job.ID, fullImageURI) c, err := client.New(ctx, db.buildkitHost) if err != nil { return fmt.Errorf("job %d: failed to create BuildKit client for push: %w", job.ID, err) } defer c.Close() // For Dockerfile-based builds, we need to rebuild with push export // This is similar to the approach in the provided example contextFS, err := fsutil.NewFS(job.BuildContext) if err != nil { return fmt.Errorf("job %d: failed to create context FS for push: %w", job.ID, err) } dockerfilePath := job.Dockerfile if job.DockerfileContent != "" { // Write the generated Dockerfile content dockerfilePath = filepath.Join(job.BuildContext, "Dockerfile") if err := os.WriteFile(dockerfilePath, []byte(job.DockerfileContent), 0644); err != nil { return fmt.Errorf("job %d: failed to write Dockerfile for push: %w", job.ID, err) } } dockerfileFS, err := fsutil.NewFS(filepath.Dir(dockerfilePath)) if err != nil { return fmt.Errorf("job %d: failed to create dockerfile FS for push: %w", job.ID, err) } // Parse build args buildArgs := make(map[string]string) if job.BuildArgs != "" { // Parse JSON build args if needed // For simplicity, assume it's already a map or handle JSON parsing } frontendAttrs := map[string]string{ "filename": filepath.Base(dockerfilePath), } if job.NoCache { frontendAttrs["no-cache"] = "" } for key, value := range buildArgs { frontendAttrs["build-arg:"+key] = value } solveOpt := &client.SolveOpt{ Exports: []client.ExportEntry{ { Type: client.ExporterImage, Attrs: map[string]string{ "name": fullImageURI, "push": "true", }, }, }, LocalMounts: map[string]fsutil.FS{ "context": contextFS, "dockerfile": dockerfileFS, }, Frontend: "dockerfile.v0", FrontendAttrs: frontendAttrs, } // Setup authentication for push if username != "" && password != "" { authConfig := authprovider.DockerAuthProviderConfig{ ConfigFile: &configfile.ConfigFile{ AuthConfigs: map[string]clitypes.AuthConfig{ registryURL: { Username: username, Password: password, }, }, }, } solveOpt.Session = []session.Attachable{ authprovider.NewDockerAuthProvider(authConfig), } } ch := make(chan *client.SolveStatus) eg, ctx := errgroup.WithContext(ctx) // Process solve status updates eg.Go(func() error { for status := range ch { // Log progress if needed for _, vertex := range status.Vertexes { if vertex.Completed != nil { db.entry.Debugf("Job %d: Vertex %s completed", job.ID, vertex.Name) } } } db.entry.Infof("Job %d: Solve status channel closed", job.ID) return nil }) // Execute the solve eg.Go(func() error { db.entry.Infof("Job %d: Starting BuildKit solve for push", job.ID) _, err := c.Solve(ctx, nil, *solveOpt, ch) if err != nil { db.entry.Errorf("Job %d: BuildKit solve failed: %v", job.ID, err) } else { db.entry.Infof("Job %d: BuildKit solve completed successfully", job.ID) } return err }) if err := eg.Wait(); err != nil { return fmt.Errorf("job %d: failed to push image: %w", job.ID, err) } db.entry.Infof("Job %d: Successfully pushed image %s", job.ID, fullImageURI) return nil } // CheckImageExists checks if an image exists in the registry func (db *DockerfileBuilder) CheckImageExists(ctx context.Context, fullImageURI string, registryURL string, username string, password string) (bool, error) { // This would require registry API calls, not implemented in this example db.entry.Infof("CheckImageExists called for %s (not implemented)", fullImageURI) return false, fmt.Errorf("CheckImageExists not implemented for DockerfileBuilder") } // Prune cleans up build resources func (db *DockerfileBuilder) Prune(ctx context.Context, job models.BuildJob) error { db.entry.Infof("Job %d: Prune called (no-op for DockerfileBuilder)", job.ID) return nil } // Close releases any resources held by the client func (db *DockerfileBuilder) Close() error { db.entry.Info("DockerfileBuilder closed") return nil } // BuildImageWithPush builds and pushes an image in one operation for efficiency func (db *DockerfileBuilder) BuildImageWithPush(ctx context.Context, job models.BuildJob, dockerfilePath string, contextPath string, imageName string, imageTag string, noCache bool, buildArgs map[string]string, fullImageURI string, registryURL string, username string, password string) (string, error) { db.entry.Infof("Job %d: Building and pushing image %s:%s using combined operation", job.ID, imageName, imageTag) c, err := client.New(ctx, db.buildkitHost) if err != nil { return "", fmt.Errorf("job %d: failed to create BuildKit client: %w", job.ID, err) } defer c.Close() // If we have generated Dockerfile content, write it to the build context if job.DockerfileContent != "" { dockerfilePath = filepath.Join(contextPath, "Dockerfile") if err := os.WriteFile(dockerfilePath, []byte(job.DockerfileContent), 0644); err != nil { return "", fmt.Errorf("job %d: failed to write generated Dockerfile: %w", job.ID, err) } db.entry.Infof("Job %d: Wrote generated Dockerfile to %s", job.ID, dockerfilePath) } solveOpt, err := db.newSolveOptWithPush(ctx, job, contextPath, dockerfilePath, fullImageURI, noCache, buildArgs, registryURL, username, password) if err != nil { return "", fmt.Errorf("job %d: failed to create solve options: %w", job.ID, err) } ch := make(chan *client.SolveStatus) eg, gctx := errgroup.WithContext(ctx) var buildOutput strings.Builder var solveResp *client.SolveResponse // Start the build eg.Go(func() error { var err error solveResp, err = c.Solve(gctx, nil, *solveOpt, ch) if err != nil { return fmt.Errorf("BuildKit solve failed: %w", err) } return nil }) // Collect build output eg.Go(func() error { for status := range ch { for _, v := range status.Vertexes { if v.Error != "" { buildOutput.WriteString(fmt.Sprintf("Vertex Error: %s: %s\n", v.Name, v.Error)) } } for _, l := range status.Logs { buildOutput.Write(l.Data) } } return nil }) if err := eg.Wait(); err != nil { db.entry.Errorf("Job %d: Build and push failed: %v. Output:\n%s", job.ID, err, buildOutput.String()) return buildOutput.String(), fmt.Errorf("build and push failed: %w", err) } db.entry.Infof("Job %d: Image %s built and pushed successfully", job.ID, fullImageURI) // Return digest if available if solveResp != nil && solveResp.ExporterResponse != nil { if digest, ok := solveResp.ExporterResponse["containerimage.digest"]; ok { return digest, nil } } return buildOutput.String(), nil } // newSolveOptWithPush creates solve options for combined build and push func (db *DockerfileBuilder) newSolveOptWithPush(ctx context.Context, job models.BuildJob, buildContext, dockerfilePath, fullImageURI string, noCache bool, buildArgs map[string]string, registryURL, username, password string) (*client.SolveOpt, error) { if buildContext == "" { return nil, fmt.Errorf("build context cannot be empty") } if dockerfilePath == "" { dockerfilePath = filepath.Join(buildContext, "Dockerfile") } // Create filesystem for build context contextFS, err := fsutil.NewFS(buildContext) if err != nil { return nil, fmt.Errorf("invalid build context: %w", err) } // Create filesystem for dockerfile directory dockerfileFS, err := fsutil.NewFS(filepath.Dir(dockerfilePath)) if err != nil { return nil, fmt.Errorf("invalid dockerfile directory: %w", err) } // Frontend attributes for dockerfile build frontendAttrs := map[string]string{ "filename": filepath.Base(dockerfilePath), } if noCache { frontendAttrs["no-cache"] = "" } // Add build args for key, value := range buildArgs { frontendAttrs["build-arg:"+key] = value } solveOpt := &client.SolveOpt{ Exports: []client.ExportEntry{ { Type: client.ExporterImage, Attrs: map[string]string{ "name": fullImageURI, "push": "true", // Enable push to registry }, }, }, LocalMounts: map[string]fsutil.FS{ "context": contextFS, "dockerfile": dockerfileFS, }, Frontend: "dockerfile.v0", // Use dockerfile frontend FrontendAttrs: frontendAttrs, } // Setup authentication if registry credentials are provided if username != "" && password != "" { authConfig := authprovider.DockerAuthProviderConfig{ ConfigFile: &configfile.ConfigFile{ AuthConfigs: map[string]clitypes.AuthConfig{ registryURL: { Username: username, Password: password, }, }, }, } solveOpt.Session = []session.Attachable{ authprovider.NewDockerAuthProvider(authConfig), } } return solveOpt, nil }