diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 255476053..ab91cead9 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -11,18 +11,30 @@ updates: package-ecosystem: gomod schedule: interval: weekly + groups: + dagger-dependencies: + patterns: + - "*" - directory: /acceptance-tests open-pull-requests-limit: 5 package-ecosystem: gomod schedule: interval: weekly + groups: + acceptance-tests-dependencies: + patterns: + - "*" - directory: /docs open-pull-requests-limit: 5 package-ecosystem: npm schedule: interval: weekly + groups: + docs-dependencies: + patterns: + - "*" - directory: / open-pull-requests-limit: 5 diff --git a/.github/workflows/acceptance-tests.yml b/.github/workflows/acceptance-tests.yml index 65f069da7..701a98b34 100644 --- a/.github/workflows/acceptance-tests.yml +++ b/.github/workflows/acceptance-tests.yml @@ -15,6 +15,7 @@ jobs: - name: Checkout uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 - name: Call Dagger Function + id: dagger uses: dagger/dagger-for-github@11048419d80c283890d0dd68187d44541f63dd89 # v5.11.0 with: version: "0.11.9" diff --git a/.github/workflows/check-dagger-drift.yml b/.github/workflows/check-dagger-drift.yml new file mode 100644 index 000000000..02cb891f8 --- /dev/null +++ b/.github/workflows/check-dagger-drift.yml @@ -0,0 +1,47 @@ +name: "Check for drift in Dagger files" + +on: + pull_request: + branches: + - main + +jobs: + check-dagger-drift: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6 + + - name: Determine Dagger version + id: dagger_version + run: | + sudo wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq + sudo chmod +x /usr/bin/yq + cat .github/workflows/acceptance-tests.yml| yq -r '.jobs.build.steps[] | select(.id == "dagger") | .with.version' > .version + echo "version=$(<.version)" > $GITHUB_OUTPUT + rm -rf .version + + - uses: actions/cache@v4 + id: cache_daggercli + with: + path: bin + key: daggercli-download-${{ steps.dagger_version.outputs.version }} + + - name: Install Dagger CLI + if: steps.cache_daggercli.outputs.cache-hit != 'true' + shell: bash + run: | + curl -L https://dl.dagger.io/dagger/install.sh | DAGGER_VERSION=${{ steps.dagger_version.outputs.version }} sh + + - name: Check drift + run: | + set -e + export PATH=$PATH:$PWD/bin + make dagger-develop + if [[ -z "$(git status --porcelain ./dagger)" ]]; then + echo "No drift detected" + else + echo "Drift detected. Run 'make dagger-develop' and commit the changed files." + git diff + exit 1 + fi diff --git a/Makefile b/Makefile index 6f308ce72..a04fd8d61 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: lint test static install uninstall cross acceptance-tests +.PHONY: lint test static install uninstall cross acceptance-tests dagger-develop GOPATH := $(shell go env GOPATH) VERSION := $(shell git describe --tags --dirty --always) BIN_DIR := $(GOPATH)/bin @@ -41,3 +41,8 @@ cross: $(GOX) # Docker container container: static docker build -t grafana/tanka . + +dagger-develop: + @cp dagger/.gitignore dagger/.gitignore.bak + @dagger develop --silent + @mv dagger/.gitignore.bak dagger/.gitignore diff --git a/dagger.json b/dagger.json index 45f5b9aae..c081617fd 100644 --- a/dagger.json +++ b/dagger.json @@ -8,7 +8,7 @@ } ], "source": "dagger", - "engineVersion": "v0.11.7", + "engineVersion": "v0.11.9", "views": [ { "name": "source-files", diff --git a/dagger/.gitignore b/dagger/.gitignore index 7ebabcc14..e69de29bb 100644 --- a/dagger/.gitignore +++ b/dagger/.gitignore @@ -1,4 +0,0 @@ -/dagger.gen.go -/internal/dagger -/internal/querybuilder -/internal/telemetry diff --git a/dagger/README.md b/dagger/README.md index 01384f5ac..ae42c48ce 100644 --- a/dagger/README.md +++ b/dagger/README.md @@ -1,5 +1,6 @@ # Dagger setup for Tanka development This module includes dagger functions to be used during development of Tanka. -To work on these functions, please run `dagger develop` in the root directory -of the project, which generates the required libraries. +Part of it are also auto-generated files created using `dagger develop`. When +updating Dagger you might need to run this command through `make +dagger-develop` to update these files. diff --git a/dagger/dagger.gen.go b/dagger/dagger.gen.go new file mode 100644 index 000000000..a8b6dd7c2 --- /dev/null +++ b/dagger/dagger.gen.go @@ -0,0 +1,726 @@ +// Code generated by dagger. DO NOT EDIT. + +package main + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "os" + + "github.com/grafana/tanka/dagger/internal/dagger" + "github.com/grafana/tanka/dagger/internal/telemetry" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + "go.opentelemetry.io/otel/trace" +) + +var dag = dagger.Connect() + +func Tracer() trace.Tracer { + return otel.Tracer("dagger.io/sdk.go") +} + +// used for local MarshalJSON implementations +var marshalCtx = context.Background() + +// called by main() +func setMarshalContext(ctx context.Context) { + marshalCtx = ctx + dagger.SetMarshalContext(ctx) +} + +type DaggerObject = dagger.DaggerObject + +type ExecError = dagger.ExecError + +// The `CacheVolumeID` scalar type represents an identifier for an object of type CacheVolume. +type CacheVolumeID = dagger.CacheVolumeID + +// The `ContainerID` scalar type represents an identifier for an object of type Container. +type ContainerID = dagger.ContainerID + +// The `CurrentModuleID` scalar type represents an identifier for an object of type CurrentModule. +type CurrentModuleID = dagger.CurrentModuleID + +// The `DirectoryID` scalar type represents an identifier for an object of type Directory. +type DirectoryID = dagger.DirectoryID + +// The `EnvVariableID` scalar type represents an identifier for an object of type EnvVariable. +type EnvVariableID = dagger.EnvVariableID + +// The `FieldTypeDefID` scalar type represents an identifier for an object of type FieldTypeDef. +type FieldTypeDefID = dagger.FieldTypeDefID + +// The `FileID` scalar type represents an identifier for an object of type File. +type FileID = dagger.FileID + +// The `FunctionArgID` scalar type represents an identifier for an object of type FunctionArg. +type FunctionArgID = dagger.FunctionArgID + +// The `FunctionCallArgValueID` scalar type represents an identifier for an object of type FunctionCallArgValue. +type FunctionCallArgValueID = dagger.FunctionCallArgValueID + +// The `FunctionCallID` scalar type represents an identifier for an object of type FunctionCall. +type FunctionCallID = dagger.FunctionCallID + +// The `FunctionID` scalar type represents an identifier for an object of type Function. +type FunctionID = dagger.FunctionID + +// The `GeneratedCodeID` scalar type represents an identifier for an object of type GeneratedCode. +type GeneratedCodeID = dagger.GeneratedCodeID + +// The `GitModuleSourceID` scalar type represents an identifier for an object of type GitModuleSource. +type GitModuleSourceID = dagger.GitModuleSourceID + +// The `GitRefID` scalar type represents an identifier for an object of type GitRef. +type GitRefID = dagger.GitRefID + +// The `GitRepositoryID` scalar type represents an identifier for an object of type GitRepository. +type GitRepositoryID = dagger.GitRepositoryID + +// The `InputTypeDefID` scalar type represents an identifier for an object of type InputTypeDef. +type InputTypeDefID = dagger.InputTypeDefID + +// The `InterfaceTypeDefID` scalar type represents an identifier for an object of type InterfaceTypeDef. +type InterfaceTypeDefID = dagger.InterfaceTypeDefID + +// An arbitrary JSON-encoded value. +type JSON = dagger.JSON + +// The `K3SID` scalar type represents an identifier for an object of type K3S. +type K3SID = dagger.K3SID + +// The `LabelID` scalar type represents an identifier for an object of type Label. +type LabelID = dagger.LabelID + +// The `ListTypeDefID` scalar type represents an identifier for an object of type ListTypeDef. +type ListTypeDefID = dagger.ListTypeDefID + +// The `LocalModuleSourceID` scalar type represents an identifier for an object of type LocalModuleSource. +type LocalModuleSourceID = dagger.LocalModuleSourceID + +// The `ModuleDependencyID` scalar type represents an identifier for an object of type ModuleDependency. +type ModuleDependencyID = dagger.ModuleDependencyID + +// The `ModuleID` scalar type represents an identifier for an object of type Module. +type ModuleID = dagger.ModuleID + +// The `ModuleSourceID` scalar type represents an identifier for an object of type ModuleSource. +type ModuleSourceID = dagger.ModuleSourceID + +// The `ModuleSourceViewID` scalar type represents an identifier for an object of type ModuleSourceView. +type ModuleSourceViewID = dagger.ModuleSourceViewID + +// The `ObjectTypeDefID` scalar type represents an identifier for an object of type ObjectTypeDef. +type ObjectTypeDefID = dagger.ObjectTypeDefID + +// The platform config OS and architecture in a Container. +// +// The format is [os]/[platform]/[version] (e.g., "darwin/arm64/v7", "windows/amd64", "linux/arm64"). +type Platform = dagger.Platform + +// The `PortID` scalar type represents an identifier for an object of type Port. +type PortID = dagger.PortID + +// The `ScalarTypeDefID` scalar type represents an identifier for an object of type ScalarTypeDef. +type ScalarTypeDefID = dagger.ScalarTypeDefID + +// The `SecretID` scalar type represents an identifier for an object of type Secret. +type SecretID = dagger.SecretID + +// The `ServiceID` scalar type represents an identifier for an object of type Service. +type ServiceID = dagger.ServiceID + +// The `SocketID` scalar type represents an identifier for an object of type Socket. +type SocketID = dagger.SocketID + +// The `TerminalID` scalar type represents an identifier for an object of type Terminal. +type TerminalID = dagger.TerminalID + +// The `TypeDefID` scalar type represents an identifier for an object of type TypeDef. +type TypeDefID = dagger.TypeDefID + +// The absence of a value. +// +// A Null Void is used as a placeholder for resolvers that do not return anything. +type Void = dagger.Void + +// Key value object that represents a build argument. +type BuildArg = dagger.BuildArg + +// Key value object that represents a pipeline label. +type PipelineLabel = dagger.PipelineLabel + +// Port forwarding rules for tunneling network traffic. +type PortForward = dagger.PortForward + +// A directory whose contents persist across runs. +type CacheVolume = dagger.CacheVolume + +// An OCI-compatible container, also known as a Docker container. +type Container = dagger.Container + +type WithContainerFunc = dagger.WithContainerFunc + +// ContainerAsTarballOpts contains options for Container.AsTarball +type ContainerAsTarballOpts = dagger.ContainerAsTarballOpts + +// ContainerBuildOpts contains options for Container.Build +type ContainerBuildOpts = dagger.ContainerBuildOpts + +// ContainerExportOpts contains options for Container.Export +type ContainerExportOpts = dagger.ContainerExportOpts + +// ContainerImportOpts contains options for Container.Import +type ContainerImportOpts = dagger.ContainerImportOpts + +// ContainerPipelineOpts contains options for Container.Pipeline +type ContainerPipelineOpts = dagger.ContainerPipelineOpts + +// ContainerPublishOpts contains options for Container.Publish +type ContainerPublishOpts = dagger.ContainerPublishOpts + +// ContainerTerminalOpts contains options for Container.Terminal +type ContainerTerminalOpts = dagger.ContainerTerminalOpts + +// ContainerWithDefaultTerminalCmdOpts contains options for Container.WithDefaultTerminalCmd +type ContainerWithDefaultTerminalCmdOpts = dagger.ContainerWithDefaultTerminalCmdOpts + +// ContainerWithDirectoryOpts contains options for Container.WithDirectory +type ContainerWithDirectoryOpts = dagger.ContainerWithDirectoryOpts + +// ContainerWithEntrypointOpts contains options for Container.WithEntrypoint +type ContainerWithEntrypointOpts = dagger.ContainerWithEntrypointOpts + +// ContainerWithEnvVariableOpts contains options for Container.WithEnvVariable +type ContainerWithEnvVariableOpts = dagger.ContainerWithEnvVariableOpts + +// ContainerWithExecOpts contains options for Container.WithExec +type ContainerWithExecOpts = dagger.ContainerWithExecOpts + +// ContainerWithExposedPortOpts contains options for Container.WithExposedPort +type ContainerWithExposedPortOpts = dagger.ContainerWithExposedPortOpts + +// ContainerWithFileOpts contains options for Container.WithFile +type ContainerWithFileOpts = dagger.ContainerWithFileOpts + +// ContainerWithFilesOpts contains options for Container.WithFiles +type ContainerWithFilesOpts = dagger.ContainerWithFilesOpts + +// ContainerWithMountedCacheOpts contains options for Container.WithMountedCache +type ContainerWithMountedCacheOpts = dagger.ContainerWithMountedCacheOpts + +// ContainerWithMountedDirectoryOpts contains options for Container.WithMountedDirectory +type ContainerWithMountedDirectoryOpts = dagger.ContainerWithMountedDirectoryOpts + +// ContainerWithMountedFileOpts contains options for Container.WithMountedFile +type ContainerWithMountedFileOpts = dagger.ContainerWithMountedFileOpts + +// ContainerWithMountedSecretOpts contains options for Container.WithMountedSecret +type ContainerWithMountedSecretOpts = dagger.ContainerWithMountedSecretOpts + +// ContainerWithNewFileOpts contains options for Container.WithNewFile +type ContainerWithNewFileOpts = dagger.ContainerWithNewFileOpts + +// ContainerWithUnixSocketOpts contains options for Container.WithUnixSocket +type ContainerWithUnixSocketOpts = dagger.ContainerWithUnixSocketOpts + +// ContainerWithoutEntrypointOpts contains options for Container.WithoutEntrypoint +type ContainerWithoutEntrypointOpts = dagger.ContainerWithoutEntrypointOpts + +// ContainerWithoutExposedPortOpts contains options for Container.WithoutExposedPort +type ContainerWithoutExposedPortOpts = dagger.ContainerWithoutExposedPortOpts + +// Reflective module API provided to functions at runtime. +type CurrentModule = dagger.CurrentModule + +// CurrentModuleWorkdirOpts contains options for CurrentModule.Workdir +type CurrentModuleWorkdirOpts = dagger.CurrentModuleWorkdirOpts + +// A directory. +type Directory = dagger.Directory + +type WithDirectoryFunc = dagger.WithDirectoryFunc + +// DirectoryAsModuleOpts contains options for Directory.AsModule +type DirectoryAsModuleOpts = dagger.DirectoryAsModuleOpts + +// DirectoryDockerBuildOpts contains options for Directory.DockerBuild +type DirectoryDockerBuildOpts = dagger.DirectoryDockerBuildOpts + +// DirectoryEntriesOpts contains options for Directory.Entries +type DirectoryEntriesOpts = dagger.DirectoryEntriesOpts + +// DirectoryExportOpts contains options for Directory.Export +type DirectoryExportOpts = dagger.DirectoryExportOpts + +// DirectoryPipelineOpts contains options for Directory.Pipeline +type DirectoryPipelineOpts = dagger.DirectoryPipelineOpts + +// DirectoryWithDirectoryOpts contains options for Directory.WithDirectory +type DirectoryWithDirectoryOpts = dagger.DirectoryWithDirectoryOpts + +// DirectoryWithFileOpts contains options for Directory.WithFile +type DirectoryWithFileOpts = dagger.DirectoryWithFileOpts + +// DirectoryWithFilesOpts contains options for Directory.WithFiles +type DirectoryWithFilesOpts = dagger.DirectoryWithFilesOpts + +// DirectoryWithNewDirectoryOpts contains options for Directory.WithNewDirectory +type DirectoryWithNewDirectoryOpts = dagger.DirectoryWithNewDirectoryOpts + +// DirectoryWithNewFileOpts contains options for Directory.WithNewFile +type DirectoryWithNewFileOpts = dagger.DirectoryWithNewFileOpts + +// An environment variable name and value. +type EnvVariable = dagger.EnvVariable + +// A definition of a field on a custom object defined in a Module. +// +// A field on an object has a static value, as opposed to a function on an object whose value is computed by invoking code (and can accept arguments). +type FieldTypeDef = dagger.FieldTypeDef + +// A file. +type File = dagger.File + +type WithFileFunc = dagger.WithFileFunc + +// FileExportOpts contains options for File.Export +type FileExportOpts = dagger.FileExportOpts + +// Function represents a resolver provided by a Module. +// +// A function always evaluates against a parent object and is given a set of named arguments. +type Function = dagger.Function + +type WithFunctionFunc = dagger.WithFunctionFunc + +// FunctionWithArgOpts contains options for Function.WithArg +type FunctionWithArgOpts = dagger.FunctionWithArgOpts + +// An argument accepted by a function. +// +// This is a specification for an argument at function definition time, not an argument passed at function call time. +type FunctionArg = dagger.FunctionArg + +// An active function call. +type FunctionCall = dagger.FunctionCall + +// A value passed as a named argument to a function call. +type FunctionCallArgValue = dagger.FunctionCallArgValue + +// The result of running an SDK's codegen. +type GeneratedCode = dagger.GeneratedCode + +type WithGeneratedCodeFunc = dagger.WithGeneratedCodeFunc + +// Module source originating from a git repo. +type GitModuleSource = dagger.GitModuleSource + +// A git ref (tag, branch, or commit). +type GitRef = dagger.GitRef + +// GitRefTreeOpts contains options for GitRef.Tree +type GitRefTreeOpts = dagger.GitRefTreeOpts + +// A git repository. +type GitRepository = dagger.GitRepository + +type WithGitRepositoryFunc = dagger.WithGitRepositoryFunc + +// A graphql input type, which is essentially just a group of named args. +// This is currently only used to represent pre-existing usage of graphql input types +// in the core API. It is not used by user modules and shouldn't ever be as user +// module accept input objects via their id rather than graphql input types. +type InputTypeDef = dagger.InputTypeDef + +// A definition of a custom interface defined in a Module. +type InterfaceTypeDef = dagger.InterfaceTypeDef + +type K3S = dagger.K3S + +type WithK3SFunc = dagger.WithK3SFunc + +// A simple key value object that represents a label. +type Label = dagger.Label + +// A definition of a list type in a Module. +type ListTypeDef = dagger.ListTypeDef + +// Module source that that originates from a path locally relative to an arbitrary directory. +type LocalModuleSource = dagger.LocalModuleSource + +// A Dagger module. +type Module = dagger.Module + +type WithModuleFunc = dagger.WithModuleFunc + +// The configuration of dependency of a module. +type ModuleDependency = dagger.ModuleDependency + +// The source needed to load and run a module, along with any metadata about the source such as versions/urls/etc. +type ModuleSource = dagger.ModuleSource + +type WithModuleSourceFunc = dagger.WithModuleSourceFunc + +// ModuleSourceResolveDirectoryFromCallerOpts contains options for ModuleSource.ResolveDirectoryFromCaller +type ModuleSourceResolveDirectoryFromCallerOpts = dagger.ModuleSourceResolveDirectoryFromCallerOpts + +// A named set of path filters that can be applied to directory arguments provided to functions. +type ModuleSourceView = dagger.ModuleSourceView + +// A definition of a custom object defined in a Module. +type ObjectTypeDef = dagger.ObjectTypeDef + +// A port exposed by a container. +type Port = dagger.Port + +// The root of the DAG. +type Client = dagger.Client + +type WithClientFunc = dagger.WithClientFunc + +// ContainerOpts contains options for Client.Container +type ContainerOpts = dagger.ContainerOpts + +// DirectoryOpts contains options for Client.Directory +type DirectoryOpts = dagger.DirectoryOpts + +// GitOpts contains options for Client.Git +type GitOpts = dagger.GitOpts + +// HTTPOpts contains options for Client.HTTP +type HTTPOpts = dagger.HTTPOpts + +// ModuleDependencyOpts contains options for Client.ModuleDependency +type ModuleDependencyOpts = dagger.ModuleDependencyOpts + +// ModuleSourceOpts contains options for Client.ModuleSource +type ModuleSourceOpts = dagger.ModuleSourceOpts + +// PipelineOpts contains options for Client.Pipeline +type PipelineOpts = dagger.PipelineOpts + +// SecretOpts contains options for Client.Secret +type SecretOpts = dagger.SecretOpts + +// A definition of a custom scalar defined in a Module. +type ScalarTypeDef = dagger.ScalarTypeDef + +// A reference to a secret value, which can be handled more safely than the value itself. +type Secret = dagger.Secret + +// A content-addressed service providing TCP connectivity. +type Service = dagger.Service + +// ServiceEndpointOpts contains options for Service.Endpoint +type ServiceEndpointOpts = dagger.ServiceEndpointOpts + +// ServiceStopOpts contains options for Service.Stop +type ServiceStopOpts = dagger.ServiceStopOpts + +// ServiceUpOpts contains options for Service.Up +type ServiceUpOpts = dagger.ServiceUpOpts + +// A Unix or TCP/IP socket that can be mounted into a container. +type Socket = dagger.Socket + +// An interactive terminal that clients can connect to. +type Terminal = dagger.Terminal + +// A definition of a parameter or return type in a Module. +type TypeDef = dagger.TypeDef + +type WithTypeDefFunc = dagger.WithTypeDefFunc + +// TypeDefWithFieldOpts contains options for TypeDef.WithField +type TypeDefWithFieldOpts = dagger.TypeDefWithFieldOpts + +// TypeDefWithInterfaceOpts contains options for TypeDef.WithInterface +type TypeDefWithInterfaceOpts = dagger.TypeDefWithInterfaceOpts + +// TypeDefWithObjectOpts contains options for TypeDef.WithObject +type TypeDefWithObjectOpts = dagger.TypeDefWithObjectOpts + +// TypeDefWithScalarOpts contains options for TypeDef.WithScalar +type TypeDefWithScalarOpts = dagger.TypeDefWithScalarOpts + +// Sharing mode of the cache volume. +type CacheSharingMode = dagger.CacheSharingMode + +const ( + // Shares the cache volume amongst many build pipelines, but will serialize the writes + Locked CacheSharingMode = dagger.Locked + + // Keeps a cache volume for a single build pipeline + Private CacheSharingMode = dagger.Private + + // Shares the cache volume amongst many build pipelines + Shared CacheSharingMode = dagger.Shared +) + +// Compression algorithm to use for image layers. +type ImageLayerCompression = dagger.ImageLayerCompression + +const ( + Estargz ImageLayerCompression = dagger.Estargz + + Gzip ImageLayerCompression = dagger.Gzip + + Uncompressed ImageLayerCompression = dagger.Uncompressed + + Zstd ImageLayerCompression = dagger.Zstd +) + +// Mediatypes to use in published or exported image metadata. +type ImageMediaTypes = dagger.ImageMediaTypes + +const ( + Dockermediatypes ImageMediaTypes = dagger.Dockermediatypes + + Ocimediatypes ImageMediaTypes = dagger.Ocimediatypes +) + +// The kind of module source. +type ModuleSourceKind = dagger.ModuleSourceKind + +const ( + GitSource ModuleSourceKind = dagger.GitSource + + LocalSource ModuleSourceKind = dagger.LocalSource +) + +// Transport layer network protocol associated to a port. +type NetworkProtocol = dagger.NetworkProtocol + +const ( + Tcp NetworkProtocol = dagger.Tcp + + Udp NetworkProtocol = dagger.Udp +) + +// Distinguishes the different kinds of TypeDefs. +type TypeDefKind = dagger.TypeDefKind + +const ( + // A boolean value. + BooleanKind TypeDefKind = dagger.BooleanKind + + // A graphql input type, used only when representing the core API via TypeDefs. + InputKind TypeDefKind = dagger.InputKind + + // An integer value. + IntegerKind TypeDefKind = dagger.IntegerKind + + // A named type of functions that can be matched+implemented by other objects+interfaces. + // + // Always paired with an InterfaceTypeDef. + InterfaceKind TypeDefKind = dagger.InterfaceKind + + // A list of values all having the same type. + // + // Always paired with a ListTypeDef. + ListKind TypeDefKind = dagger.ListKind + + // A named type defined in the GraphQL schema, with fields and functions. + // + // Always paired with an ObjectTypeDef. + ObjectKind TypeDefKind = dagger.ObjectKind + + // A scalar value of any basic kind. + ScalarKind TypeDefKind = dagger.ScalarKind + + // A string value. + StringKind TypeDefKind = dagger.StringKind + + // A special kind used to signify that no value is returned. + // + // This is used for functions that have no return value. The outer TypeDef specifying this Kind is always Optional, as the Void is never actually represented. + VoidKind TypeDefKind = dagger.VoidKind +) + +// ptr returns a pointer to the given value. +func ptr[T any](v T) *T { + return &v +} + +// convertSlice converts a slice of one type to a slice of another type using a +// converter function +func convertSlice[I any, O any](in []I, f func(I) O) []O { + out := make([]O, len(in)) + for i, v := range in { + out[i] = f(v) + } + return out +} + +func (r Tanka) MarshalJSON() ([]byte, error) { + var concrete struct{} + return json.Marshal(&concrete) +} + +func (r *Tanka) UnmarshalJSON(bs []byte) error { + var concrete struct{} + err := json.Unmarshal(bs, &concrete) + if err != nil { + return err + } + return nil +} + +func main() { + ctx := context.Background() + + // Direct slog to the new stderr. This is only for dev time debugging, and + // runtime errors/warnings. + slog.SetDefault(slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{ + Level: slog.LevelWarn, + }))) + + if err := dispatch(ctx); err != nil { + fmt.Println(err.Error()) + os.Exit(2) + } +} + +func dispatch(ctx context.Context) error { + ctx = telemetry.InitEmbedded(ctx, resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceNameKey.String("dagger-go-sdk"), + // TODO version? + )) + defer telemetry.Close() + + // A lot of the "work" actually happens when we're marshalling the return + // value, which entails getting object IDs, which happens in MarshalJSON, + // which has no ctx argument, so we use this lovely global variable. + setMarshalContext(ctx) + + fnCall := dag.CurrentFunctionCall() + parentName, err := fnCall.ParentName(ctx) + if err != nil { + return fmt.Errorf("get parent name: %w", err) + } + fnName, err := fnCall.Name(ctx) + if err != nil { + return fmt.Errorf("get fn name: %w", err) + } + parentJson, err := fnCall.Parent(ctx) + if err != nil { + return fmt.Errorf("get fn parent: %w", err) + } + fnArgs, err := fnCall.InputArgs(ctx) + if err != nil { + return fmt.Errorf("get fn args: %w", err) + } + + inputArgs := map[string][]byte{} + for _, fnArg := range fnArgs { + argName, err := fnArg.Name(ctx) + if err != nil { + return fmt.Errorf("get fn arg name: %w", err) + } + argValue, err := fnArg.Value(ctx) + if err != nil { + return fmt.Errorf("get fn arg value: %w", err) + } + inputArgs[argName] = []byte(argValue) + } + + result, err := invoke(ctx, []byte(parentJson), parentName, fnName, inputArgs) + if err != nil { + return fmt.Errorf("invoke: %w", err) + } + resultBytes, err := json.Marshal(result) + if err != nil { + return fmt.Errorf("marshal: %w", err) + } + _, err = fnCall.ReturnValue(ctx, JSON(resultBytes)) + if err != nil { + return fmt.Errorf("store return value: %w", err) + } + return nil +} + +func invoke(ctx context.Context, parentJSON []byte, parentName string, fnName string, inputArgs map[string][]byte) (_ any, err error) { + _ = inputArgs + switch parentName { + case "Tanka": + switch fnName { + case "Build": + var parent Tanka + err = json.Unmarshal(parentJSON, &parent) + if err != nil { + panic(fmt.Errorf("%s: %w", "failed to unmarshal parent object", err)) + } + var rootDir *Directory + if inputArgs["rootDir"] != nil { + err = json.Unmarshal([]byte(inputArgs["rootDir"]), &rootDir) + if err != nil { + panic(fmt.Errorf("%s: %w", "failed to unmarshal input arg rootDir", err)) + } + } + return (*Tanka).Build(&parent, ctx, rootDir), nil + case "GetGoVersion": + var parent Tanka + err = json.Unmarshal(parentJSON, &parent) + if err != nil { + panic(fmt.Errorf("%s: %w", "failed to unmarshal parent object", err)) + } + var file *File + if inputArgs["file"] != nil { + err = json.Unmarshal([]byte(inputArgs["file"]), &file) + if err != nil { + panic(fmt.Errorf("%s: %w", "failed to unmarshal input arg file", err)) + } + } + return (*Tanka).GetGoVersion(&parent, ctx, file) + case "AcceptanceTests": + var parent Tanka + err = json.Unmarshal(parentJSON, &parent) + if err != nil { + panic(fmt.Errorf("%s: %w", "failed to unmarshal parent object", err)) + } + var rootDir *Directory + if inputArgs["rootDir"] != nil { + err = json.Unmarshal([]byte(inputArgs["rootDir"]), &rootDir) + if err != nil { + panic(fmt.Errorf("%s: %w", "failed to unmarshal input arg rootDir", err)) + } + } + var acceptanceTestsDir *Directory + if inputArgs["acceptanceTestsDir"] != nil { + err = json.Unmarshal([]byte(inputArgs["acceptanceTestsDir"]), &acceptanceTestsDir) + if err != nil { + panic(fmt.Errorf("%s: %w", "failed to unmarshal input arg acceptanceTestsDir", err)) + } + } + return (*Tanka).AcceptanceTests(&parent, ctx, rootDir, acceptanceTestsDir) + default: + return nil, fmt.Errorf("unknown function %s", fnName) + } + case "": + return dag.Module(). + WithObject( + dag.TypeDef().WithObject("Tanka"). + WithFunction( + dag.Function("Build", + dag.TypeDef().WithObject("Container")). + WithArg("rootDir", dag.TypeDef().WithObject("Directory"))). + WithFunction( + dag.Function("GetGoVersion", + dag.TypeDef().WithKind(StringKind)). + WithArg("file", dag.TypeDef().WithObject("File"))). + WithFunction( + dag.Function("AcceptanceTests", + dag.TypeDef().WithKind(StringKind)). + WithArg("rootDir", dag.TypeDef().WithObject("Directory")). + WithArg("acceptanceTestsDir", dag.TypeDef().WithObject("Directory")))), nil + default: + return nil, fmt.Errorf("unknown object %s", parentName) + } +} diff --git a/dagger/go.mod b/dagger/go.mod index e00f0ea6b..cee5f0035 100644 --- a/dagger/go.mod +++ b/dagger/go.mod @@ -6,11 +6,11 @@ require ( github.com/99designs/gqlgen v0.17.44 github.com/Khan/genqlient v0.7.0 github.com/vektah/gqlparser/v2 v2.5.11 - go.opentelemetry.io/otel v1.26.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 - go.opentelemetry.io/otel/sdk v1.26.0 - go.opentelemetry.io/otel/trace v1.26.0 + go.opentelemetry.io/otel v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 + go.opentelemetry.io/otel/sdk v1.27.0 + go.opentelemetry.io/otel/trace v1.27.0 golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa golang.org/x/sync v0.7.0 google.golang.org/grpc v1.64.0 @@ -25,16 +25,16 @@ require ( github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect github.com/sosodev/duration v1.2.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.0.0-20240518090000-14441aefdf88 - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.2.0-alpha - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 // indirect - go.opentelemetry.io/otel/log v0.2.0-alpha - go.opentelemetry.io/otel/metric v1.26.0 // indirect - go.opentelemetry.io/otel/sdk/log v0.2.0-alpha - go.opentelemetry.io/proto/otlp v1.2.0 + go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.3.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 // indirect + go.opentelemetry.io/otel/log v0.3.0 + go.opentelemetry.io/otel/metric v1.27.0 // indirect + go.opentelemetry.io/otel/sdk/log v0.3.0 + go.opentelemetry.io/proto/otlp v1.3.1 golang.org/x/net v0.25.0 // indirect golang.org/x/sys v0.20.0 // indirect golang.org/x/text v0.15.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 // indirect google.golang.org/protobuf v1.34.1 // indirect ) diff --git a/dagger/go.sum b/dagger/go.sum index 3c614bc00..554948621 100644 --- a/dagger/go.sum +++ b/dagger/go.sum @@ -35,30 +35,30 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/vektah/gqlparser/v2 v2.5.11 h1:JJxLtXIoN7+3x6MBdtIP59TP1RANnY7pXOaDnADQSf8= github.com/vektah/gqlparser/v2 v2.5.11/go.mod h1:1rCcfwB2ekJofmluGWXMSEnPMZgbxzwj6FaZ/4OT8Cc= -go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs= -go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4= +go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg= +go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.0.0-20240518090000-14441aefdf88 h1:oM0GTNKGlc5qHctWeIGTVyda4iFFalOzMZ3Ehj5rwB4= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.0.0-20240518090000-14441aefdf88/go.mod h1:JGG8ebaMO5nXOPnvKEl+DiA4MGwFjCbjsxT1WHIEBPY= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.2.0-alpha h1:z2s6Zba+OUyayRv5m1AXWNUTGh57K1iMhy6emU5QT5Y= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.2.0-alpha/go.mod h1:paOXXyUgPW6jYxYkP0pB47H2zHE1fPvMJ4E4G9LHOi0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0 h1:Waw9Wfpo/IXzOI8bCB7DIk+0JZcqqsyn1JFnAc+iam8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.26.0/go.mod h1:wnJIG4fOqyynOnnQF/eQb4/16VlX2EJAHhHgqIqWfAo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0 h1:1wp/gyxsuYtuE/JFxsQRtcCDtMrO2qMvlfXALU5wkzI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.26.0/go.mod h1:gbTHmghkGgqxMomVQQMur1Nba4M0MQ8AYThXDUjsJ38= -go.opentelemetry.io/otel/log v0.2.0-alpha h1:ixOPvMzserpqA07SENHvRzkZOsnG0XbPr74hv1AQ+n0= -go.opentelemetry.io/otel/log v0.2.0-alpha/go.mod h1:vbFZc65yq4c4ssvXY43y/nIqkNJLxORrqw0L85P59LA= -go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30= -go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4= -go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8= -go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs= -go.opentelemetry.io/otel/sdk/log v0.2.0-alpha h1:jGTkL/jroJ31jnP6jDl34N/mDOfRGGYZHcHsCM+5kWA= -go.opentelemetry.io/otel/sdk/log v0.2.0-alpha/go.mod h1:Hd8Lw9FPGUM3pfY7iGMRvFaC2Nyau4Ajb5WnQ9OdIho= -go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA= -go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0= -go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= -go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.3.0 h1:ccBrA8nCY5mM0y5uO7FT0ze4S0TuFcWdDB2FxGMTjkI= +go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.3.0/go.mod h1:/9pb6634zi2Lk8LYg9Q0X8Ar6jka4dkFOylBLbVQPCE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0 h1:R9DE4kQ4k+YtfLI2ULwX82VtNQ2J8yZmA7ZIF/D+7Mc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.27.0/go.mod h1:OQFyQVrDlbe+R7xrEyDr/2Wr67Ol0hRUgsfA+V5A95s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0 h1:QY7/0NeRPKlzusf40ZE4t1VlMKbqSNT7cJRYzWuja0s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.27.0/go.mod h1:HVkSiDhTM9BoUJU8qE6j2eSWLLXvi1USXjyd2BXT8PY= +go.opentelemetry.io/otel/log v0.3.0 h1:kJRFkpUFYtny37NQzL386WbznUByZx186DpEMKhEGZs= +go.opentelemetry.io/otel/log v0.3.0/go.mod h1:ziCwqZr9soYDwGNbIL+6kAvQC+ANvjgG367HVcyR/ys= +go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik= +go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak= +go.opentelemetry.io/otel/sdk v1.27.0 h1:mlk+/Y1gLPLn84U4tI8d3GNJmGT/eXe3ZuOXN9kTWmI= +go.opentelemetry.io/otel/sdk v1.27.0/go.mod h1:Ha9vbLwJE6W86YstIywK2xFfPjbWlCuwPtMkKdz/Y4A= +go.opentelemetry.io/otel/sdk/log v0.3.0 h1:GEjJ8iftz2l+XO1GF2856r7yYVh74URiF9JMcAacr5U= +go.opentelemetry.io/otel/sdk/log v0.3.0/go.mod h1:BwCxtmux6ACLuys1wlbc0+vGBd+xytjmjajwqqIul2g= +go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw= +go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= @@ -71,8 +71,8 @@ golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291 h1:4HZJ3Xv1cmrJ+0aFo304Zn79ur1HMxptAE7aCPNLSqc= -google.golang.org/genproto/googleapis/api v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= +google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5 h1:P8OJ/WCl/Xo4E4zoe4/bifHpSmmKwARqyqE4nW6J2GQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240520151616-dc85e6b867a5/go.mod h1:RGnPtTG7r4i8sPlNyDeikXF99hMM+hN6QMm4ooG9g2g= google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291 h1:AgADTJarZTBqgjiUzRgfaBchgYB3/WFTC80GPwsMcRI= google.golang.org/genproto/googleapis/rpc v0.0.0-20240515191416-fc5f0ca64291/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= diff --git a/dagger/internal/dagger/dagger.gen.go b/dagger/internal/dagger/dagger.gen.go new file mode 100644 index 000000000..17a40f6d4 --- /dev/null +++ b/dagger/internal/dagger/dagger.gen.go @@ -0,0 +1,7334 @@ +// Code generated by dagger. DO NOT EDIT. + +package dagger + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net" + "net/http" + "os" + "reflect" + "strconv" + "strings" + + "github.com/Khan/genqlient/graphql" + "github.com/vektah/gqlparser/v2/gqlerror" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" + + "github.com/grafana/tanka/dagger/internal/querybuilder" + "github.com/grafana/tanka/dagger/internal/telemetry" +) + +func Tracer() trace.Tracer { + return otel.Tracer("dagger.io/sdk.go") +} + +// reassigned at runtime after the span is initialized +var marshalCtx = context.Background() + +// SetMarshalContext is a hack that lets us set the ctx to use for +// MarshalJSON implementations that get an object's ID. +func SetMarshalContext(ctx context.Context) { + marshalCtx = ctx +} + +// assertNotNil panic if the given value is nil. +// This function is used to validate that input with pointer type are not nil. +// See https://github.com/dagger/dagger/issues/5696 for more context. +func assertNotNil(argName string, value any) { + // We use reflect because just comparing value to nil is not working since + // the value is wrapped into a type when passed as parameter. + // E.g., nil become (*dagger.File)(nil). + if reflect.ValueOf(value).IsNil() { + panic(fmt.Sprintf("unexpected nil pointer for argument %q", argName)) + } +} + +type DaggerObject querybuilder.GraphQLMarshaller + +// getCustomError parses a GraphQL error into a more specific error type. +func getCustomError(err error) error { + var gqlErr *gqlerror.Error + + if !errors.As(err, &gqlErr) { + return nil + } + + ext := gqlErr.Extensions + + typ, ok := ext["_type"].(string) + if !ok { + return nil + } + + if typ == "EXEC_ERROR" { + e := &ExecError{ + original: err, + } + if code, ok := ext["exitCode"].(float64); ok { + e.ExitCode = int(code) + } + if args, ok := ext["cmd"].([]interface{}); ok { + cmd := make([]string, len(args)) + for i, v := range args { + cmd[i] = v.(string) + } + e.Cmd = cmd + } + if stdout, ok := ext["stdout"].(string); ok { + e.Stdout = stdout + } + if stderr, ok := ext["stderr"].(string); ok { + e.Stderr = stderr + } + return e + } + + return nil +} + +// ExecError is an API error from an exec operation. +type ExecError struct { + original error + Cmd []string + ExitCode int + Stdout string + Stderr string +} + +func (e *ExecError) Error() string { + // As a default when just printing the error, include the stdout + // and stderr for visibility + msg := e.Message() + if strings.TrimSpace(e.Stdout) != "" { + msg += "\nStdout:\n" + e.Stdout + } + if strings.TrimSpace(e.Stderr) != "" { + msg += "\nStderr:\n" + e.Stderr + } + return msg +} + +func (e *ExecError) Message() string { + return e.original.Error() +} + +func (e *ExecError) Unwrap() error { + return e.original +} + +// The `CacheVolumeID` scalar type represents an identifier for an object of type CacheVolume. +type CacheVolumeID string + +// The `ContainerID` scalar type represents an identifier for an object of type Container. +type ContainerID string + +// The `CurrentModuleID` scalar type represents an identifier for an object of type CurrentModule. +type CurrentModuleID string + +// The `DirectoryID` scalar type represents an identifier for an object of type Directory. +type DirectoryID string + +// The `EnvVariableID` scalar type represents an identifier for an object of type EnvVariable. +type EnvVariableID string + +// The `FieldTypeDefID` scalar type represents an identifier for an object of type FieldTypeDef. +type FieldTypeDefID string + +// The `FileID` scalar type represents an identifier for an object of type File. +type FileID string + +// The `FunctionArgID` scalar type represents an identifier for an object of type FunctionArg. +type FunctionArgID string + +// The `FunctionCallArgValueID` scalar type represents an identifier for an object of type FunctionCallArgValue. +type FunctionCallArgValueID string + +// The `FunctionCallID` scalar type represents an identifier for an object of type FunctionCall. +type FunctionCallID string + +// The `FunctionID` scalar type represents an identifier for an object of type Function. +type FunctionID string + +// The `GeneratedCodeID` scalar type represents an identifier for an object of type GeneratedCode. +type GeneratedCodeID string + +// The `GitModuleSourceID` scalar type represents an identifier for an object of type GitModuleSource. +type GitModuleSourceID string + +// The `GitRefID` scalar type represents an identifier for an object of type GitRef. +type GitRefID string + +// The `GitRepositoryID` scalar type represents an identifier for an object of type GitRepository. +type GitRepositoryID string + +// The `InputTypeDefID` scalar type represents an identifier for an object of type InputTypeDef. +type InputTypeDefID string + +// The `InterfaceTypeDefID` scalar type represents an identifier for an object of type InterfaceTypeDef. +type InterfaceTypeDefID string + +// An arbitrary JSON-encoded value. +type JSON string + +// The `K3SID` scalar type represents an identifier for an object of type K3S. +type K3SID string + +// The `LabelID` scalar type represents an identifier for an object of type Label. +type LabelID string + +// The `ListTypeDefID` scalar type represents an identifier for an object of type ListTypeDef. +type ListTypeDefID string + +// The `LocalModuleSourceID` scalar type represents an identifier for an object of type LocalModuleSource. +type LocalModuleSourceID string + +// The `ModuleDependencyID` scalar type represents an identifier for an object of type ModuleDependency. +type ModuleDependencyID string + +// The `ModuleID` scalar type represents an identifier for an object of type Module. +type ModuleID string + +// The `ModuleSourceID` scalar type represents an identifier for an object of type ModuleSource. +type ModuleSourceID string + +// The `ModuleSourceViewID` scalar type represents an identifier for an object of type ModuleSourceView. +type ModuleSourceViewID string + +// The `ObjectTypeDefID` scalar type represents an identifier for an object of type ObjectTypeDef. +type ObjectTypeDefID string + +// The platform config OS and architecture in a Container. +// +// The format is [os]/[platform]/[version] (e.g., "darwin/arm64/v7", "windows/amd64", "linux/arm64"). +type Platform string + +// The `PortID` scalar type represents an identifier for an object of type Port. +type PortID string + +// The `ScalarTypeDefID` scalar type represents an identifier for an object of type ScalarTypeDef. +type ScalarTypeDefID string + +// The `SecretID` scalar type represents an identifier for an object of type Secret. +type SecretID string + +// The `ServiceID` scalar type represents an identifier for an object of type Service. +type ServiceID string + +// The `SocketID` scalar type represents an identifier for an object of type Socket. +type SocketID string + +// The `TerminalID` scalar type represents an identifier for an object of type Terminal. +type TerminalID string + +// The `TypeDefID` scalar type represents an identifier for an object of type TypeDef. +type TypeDefID string + +// The absence of a value. +// +// A Null Void is used as a placeholder for resolvers that do not return anything. +type Void string + +// Key value object that represents a build argument. +type BuildArg struct { + // The build argument name. + Name string `json:"name"` + + // The build argument value. + Value string `json:"value"` +} + +// Key value object that represents a pipeline label. +type PipelineLabel struct { + // Label name. + Name string `json:"name"` + + // Label value. + Value string `json:"value"` +} + +// Port forwarding rules for tunneling network traffic. +type PortForward struct { + // Destination port for traffic. + Backend int `json:"backend"` + + // Port to expose to clients. If unspecified, a default will be chosen. + Frontend int `json:"frontend"` + + // Transport layer protocol to use for traffic. + Protocol NetworkProtocol `json:"protocol,omitempty"` +} + +// A directory whose contents persist across runs. +type CacheVolume struct { + query *querybuilder.Selection + + id *CacheVolumeID +} + +func (r *CacheVolume) WithGraphQLQuery(q *querybuilder.Selection) *CacheVolume { + return &CacheVolume{ + query: q, + } +} + +// A unique identifier for this CacheVolume. +func (r *CacheVolume) ID(ctx context.Context) (CacheVolumeID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response CacheVolumeID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *CacheVolume) XXX_GraphQLType() string { + return "CacheVolume" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *CacheVolume) XXX_GraphQLIDType() string { + return "CacheVolumeID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *CacheVolume) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *CacheVolume) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *CacheVolume) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadCacheVolumeFromID(CacheVolumeID(id)) + return nil +} + +// An OCI-compatible container, also known as a Docker container. +type Container struct { + query *querybuilder.Selection + + envVariable *string + export *bool + id *ContainerID + imageRef *string + label *string + platform *Platform + publish *string + stderr *string + stdout *string + sync *ContainerID + user *string + workdir *string +} +type WithContainerFunc func(r *Container) *Container + +// With calls the provided function with current Container. +// +// This is useful for reusability and readability by not breaking the calling chain. +func (r *Container) With(f WithContainerFunc) *Container { + return f(r) +} + +func (r *Container) WithGraphQLQuery(q *querybuilder.Selection) *Container { + return &Container{ + query: q, + } +} + +// Turn the container into a Service. +// +// Be sure to set any exposed ports before this conversion. +func (r *Container) AsService() *Service { + q := r.query.Select("asService") + + return &Service{ + query: q, + } +} + +// ContainerAsTarballOpts contains options for Container.AsTarball +type ContainerAsTarballOpts struct { + // Identifiers for other platform specific containers. + // + // Used for multi-platform images. + PlatformVariants []*Container + // Force each layer of the image to use the specified compression algorithm. + // + // If this is unset, then if a layer already has a compressed blob in the engine's cache, that will be used (this can result in a mix of compression algorithms for different layers). If this is unset and a layer has no compressed blob in the engine's cache, then it will be compressed using Gzip. + ForcedCompression ImageLayerCompression + // Use the specified media types for the image's layers. + // + // Defaults to OCI, which is largely compatible with most recent container runtimes, but Docker may be needed for older runtimes without OCI support. + MediaTypes ImageMediaTypes +} + +// Returns a File representing the container serialized to a tarball. +func (r *Container) AsTarball(opts ...ContainerAsTarballOpts) *File { + q := r.query.Select("asTarball") + for i := len(opts) - 1; i >= 0; i-- { + // `platformVariants` optional argument + if !querybuilder.IsZeroValue(opts[i].PlatformVariants) { + q = q.Arg("platformVariants", opts[i].PlatformVariants) + } + // `forcedCompression` optional argument + if !querybuilder.IsZeroValue(opts[i].ForcedCompression) { + q = q.Arg("forcedCompression", opts[i].ForcedCompression) + } + // `mediaTypes` optional argument + if !querybuilder.IsZeroValue(opts[i].MediaTypes) { + q = q.Arg("mediaTypes", opts[i].MediaTypes) + } + } + + return &File{ + query: q, + } +} + +// ContainerBuildOpts contains options for Container.Build +type ContainerBuildOpts struct { + // Path to the Dockerfile to use. + Dockerfile string + // Target build stage to build. + Target string + // Additional build arguments. + BuildArgs []BuildArg + // Secrets to pass to the build. + // + // They will be mounted at /run/secrets/[secret-name] in the build container + // + // They can be accessed in the Dockerfile using the "secret" mount type and mount path /run/secrets/[secret-name], e.g. RUN --mount=type=secret,id=my-secret curl [http://example.com?token=$(cat /run/secrets/my-secret)](http://example.com?token=$(cat /run/secrets/my-secret)) + Secrets []*Secret +} + +// Initializes this container from a Dockerfile build. +func (r *Container) Build(context *Directory, opts ...ContainerBuildOpts) *Container { + assertNotNil("context", context) + q := r.query.Select("build") + for i := len(opts) - 1; i >= 0; i-- { + // `dockerfile` optional argument + if !querybuilder.IsZeroValue(opts[i].Dockerfile) { + q = q.Arg("dockerfile", opts[i].Dockerfile) + } + // `target` optional argument + if !querybuilder.IsZeroValue(opts[i].Target) { + q = q.Arg("target", opts[i].Target) + } + // `buildArgs` optional argument + if !querybuilder.IsZeroValue(opts[i].BuildArgs) { + q = q.Arg("buildArgs", opts[i].BuildArgs) + } + // `secrets` optional argument + if !querybuilder.IsZeroValue(opts[i].Secrets) { + q = q.Arg("secrets", opts[i].Secrets) + } + } + q = q.Arg("context", context) + + return &Container{ + query: q, + } +} + +// Retrieves default arguments for future commands. +func (r *Container) DefaultArgs(ctx context.Context) ([]string, error) { + q := r.query.Select("defaultArgs") + + var response []string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Retrieves a directory at the given path. +// +// Mounts are included. +func (r *Container) Directory(path string) *Directory { + q := r.query.Select("directory") + q = q.Arg("path", path) + + return &Directory{ + query: q, + } +} + +// Retrieves entrypoint to be prepended to the arguments of all commands. +func (r *Container) Entrypoint(ctx context.Context) ([]string, error) { + q := r.query.Select("entrypoint") + + var response []string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Retrieves the value of the specified environment variable. +func (r *Container) EnvVariable(ctx context.Context, name string) (string, error) { + if r.envVariable != nil { + return *r.envVariable, nil + } + q := r.query.Select("envVariable") + q = q.Arg("name", name) + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Retrieves the list of environment variables passed to commands. +func (r *Container) EnvVariables(ctx context.Context) ([]EnvVariable, error) { + q := r.query.Select("envVariables") + + q = q.Select("id") + + type envVariables struct { + Id EnvVariableID + } + + convert := func(fields []envVariables) []EnvVariable { + out := []EnvVariable{} + + for i := range fields { + val := EnvVariable{id: &fields[i].Id} + val.query = q.Root().Select("loadEnvVariableFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []envVariables + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// EXPERIMENTAL API! Subject to change/removal at any time. +// +// Configures all available GPUs on the host to be accessible to this container. +// +// This currently works for Nvidia devices only. +func (r *Container) ExperimentalWithAllGPUs() *Container { + q := r.query.Select("experimentalWithAllGPUs") + + return &Container{ + query: q, + } +} + +// EXPERIMENTAL API! Subject to change/removal at any time. +// +// Configures the provided list of devices to be accessible to this container. +// +// This currently works for Nvidia devices only. +func (r *Container) ExperimentalWithGPU(devices []string) *Container { + q := r.query.Select("experimentalWithGPU") + q = q.Arg("devices", devices) + + return &Container{ + query: q, + } +} + +// ContainerExportOpts contains options for Container.Export +type ContainerExportOpts struct { + // Identifiers for other platform specific containers. + // + // Used for multi-platform image. + PlatformVariants []*Container + // Force each layer of the exported image to use the specified compression algorithm. + // + // If this is unset, then if a layer already has a compressed blob in the engine's cache, that will be used (this can result in a mix of compression algorithms for different layers). If this is unset and a layer has no compressed blob in the engine's cache, then it will be compressed using Gzip. + ForcedCompression ImageLayerCompression + // Use the specified media types for the exported image's layers. + // + // Defaults to OCI, which is largely compatible with most recent container runtimes, but Docker may be needed for older runtimes without OCI support. + MediaTypes ImageMediaTypes +} + +// Writes the container as an OCI tarball to the destination file path on the host. +// +// Return true on success. +// +// It can also export platform variants. +func (r *Container) Export(ctx context.Context, path string, opts ...ContainerExportOpts) (bool, error) { + if r.export != nil { + return *r.export, nil + } + q := r.query.Select("export") + for i := len(opts) - 1; i >= 0; i-- { + // `platformVariants` optional argument + if !querybuilder.IsZeroValue(opts[i].PlatformVariants) { + q = q.Arg("platformVariants", opts[i].PlatformVariants) + } + // `forcedCompression` optional argument + if !querybuilder.IsZeroValue(opts[i].ForcedCompression) { + q = q.Arg("forcedCompression", opts[i].ForcedCompression) + } + // `mediaTypes` optional argument + if !querybuilder.IsZeroValue(opts[i].MediaTypes) { + q = q.Arg("mediaTypes", opts[i].MediaTypes) + } + } + q = q.Arg("path", path) + + var response bool + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Retrieves the list of exposed ports. +// +// This includes ports already exposed by the image, even if not explicitly added with dagger. +func (r *Container) ExposedPorts(ctx context.Context) ([]Port, error) { + q := r.query.Select("exposedPorts") + + q = q.Select("id") + + type exposedPorts struct { + Id PortID + } + + convert := func(fields []exposedPorts) []Port { + out := []Port{} + + for i := range fields { + val := Port{id: &fields[i].Id} + val.query = q.Root().Select("loadPortFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []exposedPorts + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// Retrieves a file at the given path. +// +// Mounts are included. +func (r *Container) File(path string) *File { + q := r.query.Select("file") + q = q.Arg("path", path) + + return &File{ + query: q, + } +} + +// Initializes this container from a pulled base image. +func (r *Container) From(address string) *Container { + q := r.query.Select("from") + q = q.Arg("address", address) + + return &Container{ + query: q, + } +} + +// A unique identifier for this Container. +func (r *Container) ID(ctx context.Context) (ContainerID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response ContainerID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *Container) XXX_GraphQLType() string { + return "Container" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *Container) XXX_GraphQLIDType() string { + return "ContainerID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *Container) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *Container) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *Container) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadContainerFromID(ContainerID(id)) + return nil +} + +// The unique image reference which can only be retrieved immediately after the 'Container.From' call. +func (r *Container) ImageRef(ctx context.Context) (string, error) { + if r.imageRef != nil { + return *r.imageRef, nil + } + q := r.query.Select("imageRef") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// ContainerImportOpts contains options for Container.Import +type ContainerImportOpts struct { + // Identifies the tag to import from the archive, if the archive bundles multiple tags. + Tag string +} + +// Reads the container from an OCI tarball. +func (r *Container) Import(source *File, opts ...ContainerImportOpts) *Container { + assertNotNil("source", source) + q := r.query.Select("import") + for i := len(opts) - 1; i >= 0; i-- { + // `tag` optional argument + if !querybuilder.IsZeroValue(opts[i].Tag) { + q = q.Arg("tag", opts[i].Tag) + } + } + q = q.Arg("source", source) + + return &Container{ + query: q, + } +} + +// Retrieves the value of the specified label. +func (r *Container) Label(ctx context.Context, name string) (string, error) { + if r.label != nil { + return *r.label, nil + } + q := r.query.Select("label") + q = q.Arg("name", name) + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Retrieves the list of labels passed to container. +func (r *Container) Labels(ctx context.Context) ([]Label, error) { + q := r.query.Select("labels") + + q = q.Select("id") + + type labels struct { + Id LabelID + } + + convert := func(fields []labels) []Label { + out := []Label{} + + for i := range fields { + val := Label{id: &fields[i].Id} + val.query = q.Root().Select("loadLabelFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []labels + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// Retrieves the list of paths where a directory is mounted. +func (r *Container) Mounts(ctx context.Context) ([]string, error) { + q := r.query.Select("mounts") + + var response []string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// ContainerPipelineOpts contains options for Container.Pipeline +type ContainerPipelineOpts struct { + // Description of the sub-pipeline. + Description string + // Labels to apply to the sub-pipeline. + Labels []PipelineLabel +} + +// Creates a named sub-pipeline. +func (r *Container) Pipeline(name string, opts ...ContainerPipelineOpts) *Container { + q := r.query.Select("pipeline") + for i := len(opts) - 1; i >= 0; i-- { + // `description` optional argument + if !querybuilder.IsZeroValue(opts[i].Description) { + q = q.Arg("description", opts[i].Description) + } + // `labels` optional argument + if !querybuilder.IsZeroValue(opts[i].Labels) { + q = q.Arg("labels", opts[i].Labels) + } + } + q = q.Arg("name", name) + + return &Container{ + query: q, + } +} + +// The platform this container executes and publishes as. +func (r *Container) Platform(ctx context.Context) (Platform, error) { + if r.platform != nil { + return *r.platform, nil + } + q := r.query.Select("platform") + + var response Platform + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// ContainerPublishOpts contains options for Container.Publish +type ContainerPublishOpts struct { + // Identifiers for other platform specific containers. + // + // Used for multi-platform image. + PlatformVariants []*Container + // Force each layer of the published image to use the specified compression algorithm. + // + // If this is unset, then if a layer already has a compressed blob in the engine's cache, that will be used (this can result in a mix of compression algorithms for different layers). If this is unset and a layer has no compressed blob in the engine's cache, then it will be compressed using Gzip. + ForcedCompression ImageLayerCompression + // Use the specified media types for the published image's layers. + // + // Defaults to OCI, which is largely compatible with most recent registries, but Docker may be needed for older registries without OCI support. + MediaTypes ImageMediaTypes +} + +// Publishes this container as a new image to the specified address. +// +// Publish returns a fully qualified ref. +// +// It can also publish platform variants. +func (r *Container) Publish(ctx context.Context, address string, opts ...ContainerPublishOpts) (string, error) { + if r.publish != nil { + return *r.publish, nil + } + q := r.query.Select("publish") + for i := len(opts) - 1; i >= 0; i-- { + // `platformVariants` optional argument + if !querybuilder.IsZeroValue(opts[i].PlatformVariants) { + q = q.Arg("platformVariants", opts[i].PlatformVariants) + } + // `forcedCompression` optional argument + if !querybuilder.IsZeroValue(opts[i].ForcedCompression) { + q = q.Arg("forcedCompression", opts[i].ForcedCompression) + } + // `mediaTypes` optional argument + if !querybuilder.IsZeroValue(opts[i].MediaTypes) { + q = q.Arg("mediaTypes", opts[i].MediaTypes) + } + } + q = q.Arg("address", address) + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Retrieves this container's root filesystem. Mounts are not included. +func (r *Container) Rootfs() *Directory { + q := r.query.Select("rootfs") + + return &Directory{ + query: q, + } +} + +// The error stream of the last executed command. +// +// Will execute default command if none is set, or error if there's no default. +func (r *Container) Stderr(ctx context.Context) (string, error) { + if r.stderr != nil { + return *r.stderr, nil + } + q := r.query.Select("stderr") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The output stream of the last executed command. +// +// Will execute default command if none is set, or error if there's no default. +func (r *Container) Stdout(ctx context.Context) (string, error) { + if r.stdout != nil { + return *r.stdout, nil + } + q := r.query.Select("stdout") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Forces evaluation of the pipeline in the engine. +// +// It doesn't run the default command if no exec has been set. +func (r *Container) Sync(ctx context.Context) (*Container, error) { + q := r.query.Select("sync") + + return r, q.Execute(ctx) +} + +// ContainerTerminalOpts contains options for Container.Terminal +type ContainerTerminalOpts struct { + // If set, override the container's default terminal command and invoke these command arguments instead. + Cmd []string + // Provides Dagger access to the executed command. + // + // Do not use this option unless you trust the command being executed; the command being executed WILL BE GRANTED FULL ACCESS TO YOUR HOST FILESYSTEM. + ExperimentalPrivilegedNesting bool + // Execute the command with all root capabilities. This is similar to running a command with "sudo" or executing "docker run" with the "--privileged" flag. Containerization does not provide any security guarantees when using this option. It should only be used when absolutely necessary and only with trusted commands. + InsecureRootCapabilities bool +} + +// Return an interactive terminal for this container using its configured default terminal command if not overridden by args (or sh as a fallback default). +func (r *Container) Terminal(opts ...ContainerTerminalOpts) *Terminal { + q := r.query.Select("terminal") + for i := len(opts) - 1; i >= 0; i-- { + // `cmd` optional argument + if !querybuilder.IsZeroValue(opts[i].Cmd) { + q = q.Arg("cmd", opts[i].Cmd) + } + // `experimentalPrivilegedNesting` optional argument + if !querybuilder.IsZeroValue(opts[i].ExperimentalPrivilegedNesting) { + q = q.Arg("experimentalPrivilegedNesting", opts[i].ExperimentalPrivilegedNesting) + } + // `insecureRootCapabilities` optional argument + if !querybuilder.IsZeroValue(opts[i].InsecureRootCapabilities) { + q = q.Arg("insecureRootCapabilities", opts[i].InsecureRootCapabilities) + } + } + + return &Terminal{ + query: q, + } +} + +// Retrieves the user to be set for all commands. +func (r *Container) User(ctx context.Context) (string, error) { + if r.user != nil { + return *r.user, nil + } + q := r.query.Select("user") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Configures default arguments for future commands. +func (r *Container) WithDefaultArgs(args []string) *Container { + q := r.query.Select("withDefaultArgs") + q = q.Arg("args", args) + + return &Container{ + query: q, + } +} + +// ContainerWithDefaultTerminalCmdOpts contains options for Container.WithDefaultTerminalCmd +type ContainerWithDefaultTerminalCmdOpts struct { + // Provides Dagger access to the executed command. + // + // Do not use this option unless you trust the command being executed; the command being executed WILL BE GRANTED FULL ACCESS TO YOUR HOST FILESYSTEM. + ExperimentalPrivilegedNesting bool + // Execute the command with all root capabilities. This is similar to running a command with "sudo" or executing "docker run" with the "--privileged" flag. Containerization does not provide any security guarantees when using this option. It should only be used when absolutely necessary and only with trusted commands. + InsecureRootCapabilities bool +} + +// Set the default command to invoke for the container's terminal API. +func (r *Container) WithDefaultTerminalCmd(args []string, opts ...ContainerWithDefaultTerminalCmdOpts) *Container { + q := r.query.Select("withDefaultTerminalCmd") + for i := len(opts) - 1; i >= 0; i-- { + // `experimentalPrivilegedNesting` optional argument + if !querybuilder.IsZeroValue(opts[i].ExperimentalPrivilegedNesting) { + q = q.Arg("experimentalPrivilegedNesting", opts[i].ExperimentalPrivilegedNesting) + } + // `insecureRootCapabilities` optional argument + if !querybuilder.IsZeroValue(opts[i].InsecureRootCapabilities) { + q = q.Arg("insecureRootCapabilities", opts[i].InsecureRootCapabilities) + } + } + q = q.Arg("args", args) + + return &Container{ + query: q, + } +} + +// ContainerWithDirectoryOpts contains options for Container.WithDirectory +type ContainerWithDirectoryOpts struct { + // Patterns to exclude in the written directory (e.g. ["node_modules/**", ".gitignore", ".git/"]). + Exclude []string + // Patterns to include in the written directory (e.g. ["*.go", "go.mod", "go.sum"]). + Include []string + // A user:group to set for the directory and its contents. + // + // The user and group can either be an ID (1000:1000) or a name (foo:bar). + // + // If the group is omitted, it defaults to the same as the user. + Owner string +} + +// Retrieves this container plus a directory written at the given path. +func (r *Container) WithDirectory(path string, directory *Directory, opts ...ContainerWithDirectoryOpts) *Container { + assertNotNil("directory", directory) + q := r.query.Select("withDirectory") + for i := len(opts) - 1; i >= 0; i-- { + // `exclude` optional argument + if !querybuilder.IsZeroValue(opts[i].Exclude) { + q = q.Arg("exclude", opts[i].Exclude) + } + // `include` optional argument + if !querybuilder.IsZeroValue(opts[i].Include) { + q = q.Arg("include", opts[i].Include) + } + // `owner` optional argument + if !querybuilder.IsZeroValue(opts[i].Owner) { + q = q.Arg("owner", opts[i].Owner) + } + } + q = q.Arg("path", path) + q = q.Arg("directory", directory) + + return &Container{ + query: q, + } +} + +// ContainerWithEntrypointOpts contains options for Container.WithEntrypoint +type ContainerWithEntrypointOpts struct { + // Don't remove the default arguments when setting the entrypoint. + KeepDefaultArgs bool +} + +// Retrieves this container but with a different command entrypoint. +func (r *Container) WithEntrypoint(args []string, opts ...ContainerWithEntrypointOpts) *Container { + q := r.query.Select("withEntrypoint") + for i := len(opts) - 1; i >= 0; i-- { + // `keepDefaultArgs` optional argument + if !querybuilder.IsZeroValue(opts[i].KeepDefaultArgs) { + q = q.Arg("keepDefaultArgs", opts[i].KeepDefaultArgs) + } + } + q = q.Arg("args", args) + + return &Container{ + query: q, + } +} + +// ContainerWithEnvVariableOpts contains options for Container.WithEnvVariable +type ContainerWithEnvVariableOpts struct { + // Replace `${VAR}` or `$VAR` in the value according to the current environment variables defined in the container (e.g., "/opt/bin:$PATH"). + Expand bool +} + +// Retrieves this container plus the given environment variable. +func (r *Container) WithEnvVariable(name string, value string, opts ...ContainerWithEnvVariableOpts) *Container { + q := r.query.Select("withEnvVariable") + for i := len(opts) - 1; i >= 0; i-- { + // `expand` optional argument + if !querybuilder.IsZeroValue(opts[i].Expand) { + q = q.Arg("expand", opts[i].Expand) + } + } + q = q.Arg("name", name) + q = q.Arg("value", value) + + return &Container{ + query: q, + } +} + +// ContainerWithExecOpts contains options for Container.WithExec +type ContainerWithExecOpts struct { + // If the container has an entrypoint, ignore it for args rather than using it to wrap them. + SkipEntrypoint bool + // Content to write to the command's standard input before closing (e.g., "Hello world"). + Stdin string + // Redirect the command's standard output to a file in the container (e.g., "/tmp/stdout"). + RedirectStdout string + // Redirect the command's standard error to a file in the container (e.g., "/tmp/stderr"). + RedirectStderr string + // Provides Dagger access to the executed command. + // + // Do not use this option unless you trust the command being executed; the command being executed WILL BE GRANTED FULL ACCESS TO YOUR HOST FILESYSTEM. + ExperimentalPrivilegedNesting bool + // Execute the command with all root capabilities. This is similar to running a command with "sudo" or executing "docker run" with the "--privileged" flag. Containerization does not provide any security guarantees when using this option. It should only be used when absolutely necessary and only with trusted commands. + InsecureRootCapabilities bool +} + +// Retrieves this container after executing the specified command inside it. +func (r *Container) WithExec(args []string, opts ...ContainerWithExecOpts) *Container { + q := r.query.Select("withExec") + for i := len(opts) - 1; i >= 0; i-- { + // `skipEntrypoint` optional argument + if !querybuilder.IsZeroValue(opts[i].SkipEntrypoint) { + q = q.Arg("skipEntrypoint", opts[i].SkipEntrypoint) + } + // `stdin` optional argument + if !querybuilder.IsZeroValue(opts[i].Stdin) { + q = q.Arg("stdin", opts[i].Stdin) + } + // `redirectStdout` optional argument + if !querybuilder.IsZeroValue(opts[i].RedirectStdout) { + q = q.Arg("redirectStdout", opts[i].RedirectStdout) + } + // `redirectStderr` optional argument + if !querybuilder.IsZeroValue(opts[i].RedirectStderr) { + q = q.Arg("redirectStderr", opts[i].RedirectStderr) + } + // `experimentalPrivilegedNesting` optional argument + if !querybuilder.IsZeroValue(opts[i].ExperimentalPrivilegedNesting) { + q = q.Arg("experimentalPrivilegedNesting", opts[i].ExperimentalPrivilegedNesting) + } + // `insecureRootCapabilities` optional argument + if !querybuilder.IsZeroValue(opts[i].InsecureRootCapabilities) { + q = q.Arg("insecureRootCapabilities", opts[i].InsecureRootCapabilities) + } + } + q = q.Arg("args", args) + + return &Container{ + query: q, + } +} + +// ContainerWithExposedPortOpts contains options for Container.WithExposedPort +type ContainerWithExposedPortOpts struct { + // Transport layer network protocol + Protocol NetworkProtocol + // Optional port description + Description string + // Skip the health check when run as a service. + ExperimentalSkipHealthcheck bool +} + +// Expose a network port. +// +// Exposed ports serve two purposes: +// +// - For health checks and introspection, when running services +// +// - For setting the EXPOSE OCI field when publishing the container +func (r *Container) WithExposedPort(port int, opts ...ContainerWithExposedPortOpts) *Container { + q := r.query.Select("withExposedPort") + for i := len(opts) - 1; i >= 0; i-- { + // `protocol` optional argument + if !querybuilder.IsZeroValue(opts[i].Protocol) { + q = q.Arg("protocol", opts[i].Protocol) + } + // `description` optional argument + if !querybuilder.IsZeroValue(opts[i].Description) { + q = q.Arg("description", opts[i].Description) + } + // `experimentalSkipHealthcheck` optional argument + if !querybuilder.IsZeroValue(opts[i].ExperimentalSkipHealthcheck) { + q = q.Arg("experimentalSkipHealthcheck", opts[i].ExperimentalSkipHealthcheck) + } + } + q = q.Arg("port", port) + + return &Container{ + query: q, + } +} + +// ContainerWithFileOpts contains options for Container.WithFile +type ContainerWithFileOpts struct { + // Permission given to the copied file (e.g., 0600). + Permissions int + // A user:group to set for the file. + // + // The user and group can either be an ID (1000:1000) or a name (foo:bar). + // + // If the group is omitted, it defaults to the same as the user. + Owner string +} + +// Retrieves this container plus the contents of the given file copied to the given path. +func (r *Container) WithFile(path string, source *File, opts ...ContainerWithFileOpts) *Container { + assertNotNil("source", source) + q := r.query.Select("withFile") + for i := len(opts) - 1; i >= 0; i-- { + // `permissions` optional argument + if !querybuilder.IsZeroValue(opts[i].Permissions) { + q = q.Arg("permissions", opts[i].Permissions) + } + // `owner` optional argument + if !querybuilder.IsZeroValue(opts[i].Owner) { + q = q.Arg("owner", opts[i].Owner) + } + } + q = q.Arg("path", path) + q = q.Arg("source", source) + + return &Container{ + query: q, + } +} + +// ContainerWithFilesOpts contains options for Container.WithFiles +type ContainerWithFilesOpts struct { + // Permission given to the copied files (e.g., 0600). + Permissions int + // A user:group to set for the files. + // + // The user and group can either be an ID (1000:1000) or a name (foo:bar). + // + // If the group is omitted, it defaults to the same as the user. + Owner string +} + +// Retrieves this container plus the contents of the given files copied to the given path. +func (r *Container) WithFiles(path string, sources []*File, opts ...ContainerWithFilesOpts) *Container { + q := r.query.Select("withFiles") + for i := len(opts) - 1; i >= 0; i-- { + // `permissions` optional argument + if !querybuilder.IsZeroValue(opts[i].Permissions) { + q = q.Arg("permissions", opts[i].Permissions) + } + // `owner` optional argument + if !querybuilder.IsZeroValue(opts[i].Owner) { + q = q.Arg("owner", opts[i].Owner) + } + } + q = q.Arg("path", path) + q = q.Arg("sources", sources) + + return &Container{ + query: q, + } +} + +// Indicate that subsequent operations should be featured more prominently in the UI. +func (r *Container) WithFocus() *Container { + q := r.query.Select("withFocus") + + return &Container{ + query: q, + } +} + +// Retrieves this container plus the given label. +func (r *Container) WithLabel(name string, value string) *Container { + q := r.query.Select("withLabel") + q = q.Arg("name", name) + q = q.Arg("value", value) + + return &Container{ + query: q, + } +} + +// ContainerWithMountedCacheOpts contains options for Container.WithMountedCache +type ContainerWithMountedCacheOpts struct { + // Identifier of the directory to use as the cache volume's root. + Source *Directory + // Sharing mode of the cache volume. + Sharing CacheSharingMode + // A user:group to set for the mounted cache directory. + // + // Note that this changes the ownership of the specified mount along with the initial filesystem provided by source (if any). It does not have any effect if/when the cache has already been created. + // + // The user and group can either be an ID (1000:1000) or a name (foo:bar). + // + // If the group is omitted, it defaults to the same as the user. + Owner string +} + +// Retrieves this container plus a cache volume mounted at the given path. +func (r *Container) WithMountedCache(path string, cache *CacheVolume, opts ...ContainerWithMountedCacheOpts) *Container { + assertNotNil("cache", cache) + q := r.query.Select("withMountedCache") + for i := len(opts) - 1; i >= 0; i-- { + // `source` optional argument + if !querybuilder.IsZeroValue(opts[i].Source) { + q = q.Arg("source", opts[i].Source) + } + // `sharing` optional argument + if !querybuilder.IsZeroValue(opts[i].Sharing) { + q = q.Arg("sharing", opts[i].Sharing) + } + // `owner` optional argument + if !querybuilder.IsZeroValue(opts[i].Owner) { + q = q.Arg("owner", opts[i].Owner) + } + } + q = q.Arg("path", path) + q = q.Arg("cache", cache) + + return &Container{ + query: q, + } +} + +// ContainerWithMountedDirectoryOpts contains options for Container.WithMountedDirectory +type ContainerWithMountedDirectoryOpts struct { + // A user:group to set for the mounted directory and its contents. + // + // The user and group can either be an ID (1000:1000) or a name (foo:bar). + // + // If the group is omitted, it defaults to the same as the user. + Owner string +} + +// Retrieves this container plus a directory mounted at the given path. +func (r *Container) WithMountedDirectory(path string, source *Directory, opts ...ContainerWithMountedDirectoryOpts) *Container { + assertNotNil("source", source) + q := r.query.Select("withMountedDirectory") + for i := len(opts) - 1; i >= 0; i-- { + // `owner` optional argument + if !querybuilder.IsZeroValue(opts[i].Owner) { + q = q.Arg("owner", opts[i].Owner) + } + } + q = q.Arg("path", path) + q = q.Arg("source", source) + + return &Container{ + query: q, + } +} + +// ContainerWithMountedFileOpts contains options for Container.WithMountedFile +type ContainerWithMountedFileOpts struct { + // A user or user:group to set for the mounted file. + // + // The user and group can either be an ID (1000:1000) or a name (foo:bar). + // + // If the group is omitted, it defaults to the same as the user. + Owner string +} + +// Retrieves this container plus a file mounted at the given path. +func (r *Container) WithMountedFile(path string, source *File, opts ...ContainerWithMountedFileOpts) *Container { + assertNotNil("source", source) + q := r.query.Select("withMountedFile") + for i := len(opts) - 1; i >= 0; i-- { + // `owner` optional argument + if !querybuilder.IsZeroValue(opts[i].Owner) { + q = q.Arg("owner", opts[i].Owner) + } + } + q = q.Arg("path", path) + q = q.Arg("source", source) + + return &Container{ + query: q, + } +} + +// ContainerWithMountedSecretOpts contains options for Container.WithMountedSecret +type ContainerWithMountedSecretOpts struct { + // A user:group to set for the mounted secret. + // + // The user and group can either be an ID (1000:1000) or a name (foo:bar). + // + // If the group is omitted, it defaults to the same as the user. + Owner string + // Permission given to the mounted secret (e.g., 0600). + // + // This option requires an owner to be set to be active. + Mode int +} + +// Retrieves this container plus a secret mounted into a file at the given path. +func (r *Container) WithMountedSecret(path string, source *Secret, opts ...ContainerWithMountedSecretOpts) *Container { + assertNotNil("source", source) + q := r.query.Select("withMountedSecret") + for i := len(opts) - 1; i >= 0; i-- { + // `owner` optional argument + if !querybuilder.IsZeroValue(opts[i].Owner) { + q = q.Arg("owner", opts[i].Owner) + } + // `mode` optional argument + if !querybuilder.IsZeroValue(opts[i].Mode) { + q = q.Arg("mode", opts[i].Mode) + } + } + q = q.Arg("path", path) + q = q.Arg("source", source) + + return &Container{ + query: q, + } +} + +// Retrieves this container plus a temporary directory mounted at the given path. Any writes will be ephemeral to a single withExec call; they will not be persisted to subsequent withExecs. +func (r *Container) WithMountedTemp(path string) *Container { + q := r.query.Select("withMountedTemp") + q = q.Arg("path", path) + + return &Container{ + query: q, + } +} + +// ContainerWithNewFileOpts contains options for Container.WithNewFile +type ContainerWithNewFileOpts struct { + // Content of the file to write (e.g., "Hello world!"). + Contents string + // Permission given to the written file (e.g., 0600). + Permissions int + // A user:group to set for the file. + // + // The user and group can either be an ID (1000:1000) or a name (foo:bar). + // + // If the group is omitted, it defaults to the same as the user. + Owner string +} + +// Retrieves this container plus a new file written at the given path. +func (r *Container) WithNewFile(path string, opts ...ContainerWithNewFileOpts) *Container { + q := r.query.Select("withNewFile") + for i := len(opts) - 1; i >= 0; i-- { + // `contents` optional argument + if !querybuilder.IsZeroValue(opts[i].Contents) { + q = q.Arg("contents", opts[i].Contents) + } + // `permissions` optional argument + if !querybuilder.IsZeroValue(opts[i].Permissions) { + q = q.Arg("permissions", opts[i].Permissions) + } + // `owner` optional argument + if !querybuilder.IsZeroValue(opts[i].Owner) { + q = q.Arg("owner", opts[i].Owner) + } + } + q = q.Arg("path", path) + + return &Container{ + query: q, + } +} + +// Retrieves this container with a registry authentication for a given address. +func (r *Container) WithRegistryAuth(address string, username string, secret *Secret) *Container { + assertNotNil("secret", secret) + q := r.query.Select("withRegistryAuth") + q = q.Arg("address", address) + q = q.Arg("username", username) + q = q.Arg("secret", secret) + + return &Container{ + query: q, + } +} + +// Retrieves the container with the given directory mounted to /. +func (r *Container) WithRootfs(directory *Directory) *Container { + assertNotNil("directory", directory) + q := r.query.Select("withRootfs") + q = q.Arg("directory", directory) + + return &Container{ + query: q, + } +} + +// Retrieves this container plus an env variable containing the given secret. +func (r *Container) WithSecretVariable(name string, secret *Secret) *Container { + assertNotNil("secret", secret) + q := r.query.Select("withSecretVariable") + q = q.Arg("name", name) + q = q.Arg("secret", secret) + + return &Container{ + query: q, + } +} + +// Establish a runtime dependency on a service. +// +// The service will be started automatically when needed and detached when it is no longer needed, executing the default command if none is set. +// +// The service will be reachable from the container via the provided hostname alias. +// +// The service dependency will also convey to any files or directories produced by the container. +func (r *Container) WithServiceBinding(alias string, service *Service) *Container { + assertNotNil("service", service) + q := r.query.Select("withServiceBinding") + q = q.Arg("alias", alias) + q = q.Arg("service", service) + + return &Container{ + query: q, + } +} + +// ContainerWithUnixSocketOpts contains options for Container.WithUnixSocket +type ContainerWithUnixSocketOpts struct { + // A user:group to set for the mounted socket. + // + // The user and group can either be an ID (1000:1000) or a name (foo:bar). + // + // If the group is omitted, it defaults to the same as the user. + Owner string +} + +// Retrieves this container plus a socket forwarded to the given Unix socket path. +func (r *Container) WithUnixSocket(path string, source *Socket, opts ...ContainerWithUnixSocketOpts) *Container { + assertNotNil("source", source) + q := r.query.Select("withUnixSocket") + for i := len(opts) - 1; i >= 0; i-- { + // `owner` optional argument + if !querybuilder.IsZeroValue(opts[i].Owner) { + q = q.Arg("owner", opts[i].Owner) + } + } + q = q.Arg("path", path) + q = q.Arg("source", source) + + return &Container{ + query: q, + } +} + +// Retrieves this container with a different command user. +func (r *Container) WithUser(name string) *Container { + q := r.query.Select("withUser") + q = q.Arg("name", name) + + return &Container{ + query: q, + } +} + +// Retrieves this container with a different working directory. +func (r *Container) WithWorkdir(path string) *Container { + q := r.query.Select("withWorkdir") + q = q.Arg("path", path) + + return &Container{ + query: q, + } +} + +// Retrieves this container with unset default arguments for future commands. +func (r *Container) WithoutDefaultArgs() *Container { + q := r.query.Select("withoutDefaultArgs") + + return &Container{ + query: q, + } +} + +// Retrieves this container with the directory at the given path removed. +func (r *Container) WithoutDirectory(path string) *Container { + q := r.query.Select("withoutDirectory") + q = q.Arg("path", path) + + return &Container{ + query: q, + } +} + +// ContainerWithoutEntrypointOpts contains options for Container.WithoutEntrypoint +type ContainerWithoutEntrypointOpts struct { + // Don't remove the default arguments when unsetting the entrypoint. + KeepDefaultArgs bool +} + +// Retrieves this container with an unset command entrypoint. +func (r *Container) WithoutEntrypoint(opts ...ContainerWithoutEntrypointOpts) *Container { + q := r.query.Select("withoutEntrypoint") + for i := len(opts) - 1; i >= 0; i-- { + // `keepDefaultArgs` optional argument + if !querybuilder.IsZeroValue(opts[i].KeepDefaultArgs) { + q = q.Arg("keepDefaultArgs", opts[i].KeepDefaultArgs) + } + } + + return &Container{ + query: q, + } +} + +// Retrieves this container minus the given environment variable. +func (r *Container) WithoutEnvVariable(name string) *Container { + q := r.query.Select("withoutEnvVariable") + q = q.Arg("name", name) + + return &Container{ + query: q, + } +} + +// ContainerWithoutExposedPortOpts contains options for Container.WithoutExposedPort +type ContainerWithoutExposedPortOpts struct { + // Port protocol to unexpose + Protocol NetworkProtocol +} + +// Unexpose a previously exposed port. +func (r *Container) WithoutExposedPort(port int, opts ...ContainerWithoutExposedPortOpts) *Container { + q := r.query.Select("withoutExposedPort") + for i := len(opts) - 1; i >= 0; i-- { + // `protocol` optional argument + if !querybuilder.IsZeroValue(opts[i].Protocol) { + q = q.Arg("protocol", opts[i].Protocol) + } + } + q = q.Arg("port", port) + + return &Container{ + query: q, + } +} + +// Retrieves this container with the file at the given path removed. +func (r *Container) WithoutFile(path string) *Container { + q := r.query.Select("withoutFile") + q = q.Arg("path", path) + + return &Container{ + query: q, + } +} + +// Indicate that subsequent operations should not be featured more prominently in the UI. +// +// This is the initial state of all containers. +func (r *Container) WithoutFocus() *Container { + q := r.query.Select("withoutFocus") + + return &Container{ + query: q, + } +} + +// Retrieves this container minus the given environment label. +func (r *Container) WithoutLabel(name string) *Container { + q := r.query.Select("withoutLabel") + q = q.Arg("name", name) + + return &Container{ + query: q, + } +} + +// Retrieves this container after unmounting everything at the given path. +func (r *Container) WithoutMount(path string) *Container { + q := r.query.Select("withoutMount") + q = q.Arg("path", path) + + return &Container{ + query: q, + } +} + +// Retrieves this container without the registry authentication of a given address. +func (r *Container) WithoutRegistryAuth(address string) *Container { + q := r.query.Select("withoutRegistryAuth") + q = q.Arg("address", address) + + return &Container{ + query: q, + } +} + +// Retrieves this container minus the given environment variable containing the secret. +func (r *Container) WithoutSecretVariable(name string) *Container { + q := r.query.Select("withoutSecretVariable") + q = q.Arg("name", name) + + return &Container{ + query: q, + } +} + +// Retrieves this container with a previously added Unix socket removed. +func (r *Container) WithoutUnixSocket(path string) *Container { + q := r.query.Select("withoutUnixSocket") + q = q.Arg("path", path) + + return &Container{ + query: q, + } +} + +// Retrieves this container with an unset command user. +// +// Should default to root. +func (r *Container) WithoutUser() *Container { + q := r.query.Select("withoutUser") + + return &Container{ + query: q, + } +} + +// Retrieves this container with an unset working directory. +// +// Should default to "/". +func (r *Container) WithoutWorkdir() *Container { + q := r.query.Select("withoutWorkdir") + + return &Container{ + query: q, + } +} + +// Retrieves the working directory for all commands. +func (r *Container) Workdir(ctx context.Context) (string, error) { + if r.workdir != nil { + return *r.workdir, nil + } + q := r.query.Select("workdir") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Reflective module API provided to functions at runtime. +type CurrentModule struct { + query *querybuilder.Selection + + id *CurrentModuleID + name *string +} + +func (r *CurrentModule) WithGraphQLQuery(q *querybuilder.Selection) *CurrentModule { + return &CurrentModule{ + query: q, + } +} + +// A unique identifier for this CurrentModule. +func (r *CurrentModule) ID(ctx context.Context) (CurrentModuleID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response CurrentModuleID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *CurrentModule) XXX_GraphQLType() string { + return "CurrentModule" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *CurrentModule) XXX_GraphQLIDType() string { + return "CurrentModuleID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *CurrentModule) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *CurrentModule) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *CurrentModule) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadCurrentModuleFromID(CurrentModuleID(id)) + return nil +} + +// The name of the module being executed in +func (r *CurrentModule) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The directory containing the module's source code loaded into the engine (plus any generated code that may have been created). +func (r *CurrentModule) Source() *Directory { + q := r.query.Select("source") + + return &Directory{ + query: q, + } +} + +// CurrentModuleWorkdirOpts contains options for CurrentModule.Workdir +type CurrentModuleWorkdirOpts struct { + // Exclude artifacts that match the given pattern (e.g., ["node_modules/", ".git*"]). + Exclude []string + // Include only artifacts that match the given pattern (e.g., ["app/", "package.*"]). + Include []string +} + +// Load a directory from the module's scratch working directory, including any changes that may have been made to it during module function execution. +func (r *CurrentModule) Workdir(path string, opts ...CurrentModuleWorkdirOpts) *Directory { + q := r.query.Select("workdir") + for i := len(opts) - 1; i >= 0; i-- { + // `exclude` optional argument + if !querybuilder.IsZeroValue(opts[i].Exclude) { + q = q.Arg("exclude", opts[i].Exclude) + } + // `include` optional argument + if !querybuilder.IsZeroValue(opts[i].Include) { + q = q.Arg("include", opts[i].Include) + } + } + q = q.Arg("path", path) + + return &Directory{ + query: q, + } +} + +// Load a file from the module's scratch working directory, including any changes that may have been made to it during module function execution.Load a file from the module's scratch working directory, including any changes that may have been made to it during module function execution. +func (r *CurrentModule) WorkdirFile(path string) *File { + q := r.query.Select("workdirFile") + q = q.Arg("path", path) + + return &File{ + query: q, + } +} + +// A directory. +type Directory struct { + query *querybuilder.Selection + + export *bool + id *DirectoryID + sync *DirectoryID +} +type WithDirectoryFunc func(r *Directory) *Directory + +// With calls the provided function with current Directory. +// +// This is useful for reusability and readability by not breaking the calling chain. +func (r *Directory) With(f WithDirectoryFunc) *Directory { + return f(r) +} + +func (r *Directory) WithGraphQLQuery(q *querybuilder.Selection) *Directory { + return &Directory{ + query: q, + } +} + +// DirectoryAsModuleOpts contains options for Directory.AsModule +type DirectoryAsModuleOpts struct { + // An optional subpath of the directory which contains the module's configuration file. + // + // This is needed when the module code is in a subdirectory but requires parent directories to be loaded in order to execute. For example, the module source code may need a go.mod, project.toml, package.json, etc. file from a parent directory. + // + // If not set, the module source code is loaded from the root of the directory. + SourceRootPath string +} + +// Load the directory as a Dagger module +func (r *Directory) AsModule(opts ...DirectoryAsModuleOpts) *Module { + q := r.query.Select("asModule") + for i := len(opts) - 1; i >= 0; i-- { + // `sourceRootPath` optional argument + if !querybuilder.IsZeroValue(opts[i].SourceRootPath) { + q = q.Arg("sourceRootPath", opts[i].SourceRootPath) + } + } + + return &Module{ + query: q, + } +} + +// Gets the difference between this directory and an another directory. +func (r *Directory) Diff(other *Directory) *Directory { + assertNotNil("other", other) + q := r.query.Select("diff") + q = q.Arg("other", other) + + return &Directory{ + query: q, + } +} + +// Retrieves a directory at the given path. +func (r *Directory) Directory(path string) *Directory { + q := r.query.Select("directory") + q = q.Arg("path", path) + + return &Directory{ + query: q, + } +} + +// DirectoryDockerBuildOpts contains options for Directory.DockerBuild +type DirectoryDockerBuildOpts struct { + // The platform to build. + Platform Platform + // Path to the Dockerfile to use (e.g., "frontend.Dockerfile"). + Dockerfile string + // Target build stage to build. + Target string + // Build arguments to use in the build. + BuildArgs []BuildArg + // Secrets to pass to the build. + // + // They will be mounted at /run/secrets/[secret-name]. + Secrets []*Secret +} + +// Builds a new Docker container from this directory. +func (r *Directory) DockerBuild(opts ...DirectoryDockerBuildOpts) *Container { + q := r.query.Select("dockerBuild") + for i := len(opts) - 1; i >= 0; i-- { + // `platform` optional argument + if !querybuilder.IsZeroValue(opts[i].Platform) { + q = q.Arg("platform", opts[i].Platform) + } + // `dockerfile` optional argument + if !querybuilder.IsZeroValue(opts[i].Dockerfile) { + q = q.Arg("dockerfile", opts[i].Dockerfile) + } + // `target` optional argument + if !querybuilder.IsZeroValue(opts[i].Target) { + q = q.Arg("target", opts[i].Target) + } + // `buildArgs` optional argument + if !querybuilder.IsZeroValue(opts[i].BuildArgs) { + q = q.Arg("buildArgs", opts[i].BuildArgs) + } + // `secrets` optional argument + if !querybuilder.IsZeroValue(opts[i].Secrets) { + q = q.Arg("secrets", opts[i].Secrets) + } + } + + return &Container{ + query: q, + } +} + +// DirectoryEntriesOpts contains options for Directory.Entries +type DirectoryEntriesOpts struct { + // Location of the directory to look at (e.g., "/src"). + Path string +} + +// Returns a list of files and directories at the given path. +func (r *Directory) Entries(ctx context.Context, opts ...DirectoryEntriesOpts) ([]string, error) { + q := r.query.Select("entries") + for i := len(opts) - 1; i >= 0; i-- { + // `path` optional argument + if !querybuilder.IsZeroValue(opts[i].Path) { + q = q.Arg("path", opts[i].Path) + } + } + + var response []string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// DirectoryExportOpts contains options for Directory.Export +type DirectoryExportOpts struct { + // If true, then the host directory will be wiped clean before exporting so that it exactly matches the directory being exported; this means it will delete any files on the host that aren't in the exported dir. If false (the default), the contents of the directory will be merged with any existing contents of the host directory, leaving any existing files on the host that aren't in the exported directory alone. + Wipe bool +} + +// Writes the contents of the directory to a path on the host. +func (r *Directory) Export(ctx context.Context, path string, opts ...DirectoryExportOpts) (bool, error) { + if r.export != nil { + return *r.export, nil + } + q := r.query.Select("export") + for i := len(opts) - 1; i >= 0; i-- { + // `wipe` optional argument + if !querybuilder.IsZeroValue(opts[i].Wipe) { + q = q.Arg("wipe", opts[i].Wipe) + } + } + q = q.Arg("path", path) + + var response bool + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Retrieves a file at the given path. +func (r *Directory) File(path string) *File { + q := r.query.Select("file") + q = q.Arg("path", path) + + return &File{ + query: q, + } +} + +// Returns a list of files and directories that matche the given pattern. +func (r *Directory) Glob(ctx context.Context, pattern string) ([]string, error) { + q := r.query.Select("glob") + q = q.Arg("pattern", pattern) + + var response []string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A unique identifier for this Directory. +func (r *Directory) ID(ctx context.Context) (DirectoryID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response DirectoryID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *Directory) XXX_GraphQLType() string { + return "Directory" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *Directory) XXX_GraphQLIDType() string { + return "DirectoryID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *Directory) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *Directory) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *Directory) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadDirectoryFromID(DirectoryID(id)) + return nil +} + +// DirectoryPipelineOpts contains options for Directory.Pipeline +type DirectoryPipelineOpts struct { + // Description of the sub-pipeline. + Description string + // Labels to apply to the sub-pipeline. + Labels []PipelineLabel +} + +// Creates a named sub-pipeline. +func (r *Directory) Pipeline(name string, opts ...DirectoryPipelineOpts) *Directory { + q := r.query.Select("pipeline") + for i := len(opts) - 1; i >= 0; i-- { + // `description` optional argument + if !querybuilder.IsZeroValue(opts[i].Description) { + q = q.Arg("description", opts[i].Description) + } + // `labels` optional argument + if !querybuilder.IsZeroValue(opts[i].Labels) { + q = q.Arg("labels", opts[i].Labels) + } + } + q = q.Arg("name", name) + + return &Directory{ + query: q, + } +} + +// Force evaluation in the engine. +func (r *Directory) Sync(ctx context.Context) (*Directory, error) { + q := r.query.Select("sync") + + return r, q.Execute(ctx) +} + +// DirectoryWithDirectoryOpts contains options for Directory.WithDirectory +type DirectoryWithDirectoryOpts struct { + // Exclude artifacts that match the given pattern (e.g., ["node_modules/", ".git*"]). + Exclude []string + // Include only artifacts that match the given pattern (e.g., ["app/", "package.*"]). + Include []string +} + +// Retrieves this directory plus a directory written at the given path. +func (r *Directory) WithDirectory(path string, directory *Directory, opts ...DirectoryWithDirectoryOpts) *Directory { + assertNotNil("directory", directory) + q := r.query.Select("withDirectory") + for i := len(opts) - 1; i >= 0; i-- { + // `exclude` optional argument + if !querybuilder.IsZeroValue(opts[i].Exclude) { + q = q.Arg("exclude", opts[i].Exclude) + } + // `include` optional argument + if !querybuilder.IsZeroValue(opts[i].Include) { + q = q.Arg("include", opts[i].Include) + } + } + q = q.Arg("path", path) + q = q.Arg("directory", directory) + + return &Directory{ + query: q, + } +} + +// DirectoryWithFileOpts contains options for Directory.WithFile +type DirectoryWithFileOpts struct { + // Permission given to the copied file (e.g., 0600). + Permissions int +} + +// Retrieves this directory plus the contents of the given file copied to the given path. +func (r *Directory) WithFile(path string, source *File, opts ...DirectoryWithFileOpts) *Directory { + assertNotNil("source", source) + q := r.query.Select("withFile") + for i := len(opts) - 1; i >= 0; i-- { + // `permissions` optional argument + if !querybuilder.IsZeroValue(opts[i].Permissions) { + q = q.Arg("permissions", opts[i].Permissions) + } + } + q = q.Arg("path", path) + q = q.Arg("source", source) + + return &Directory{ + query: q, + } +} + +// DirectoryWithFilesOpts contains options for Directory.WithFiles +type DirectoryWithFilesOpts struct { + // Permission given to the copied files (e.g., 0600). + Permissions int +} + +// Retrieves this directory plus the contents of the given files copied to the given path. +func (r *Directory) WithFiles(path string, sources []*File, opts ...DirectoryWithFilesOpts) *Directory { + q := r.query.Select("withFiles") + for i := len(opts) - 1; i >= 0; i-- { + // `permissions` optional argument + if !querybuilder.IsZeroValue(opts[i].Permissions) { + q = q.Arg("permissions", opts[i].Permissions) + } + } + q = q.Arg("path", path) + q = q.Arg("sources", sources) + + return &Directory{ + query: q, + } +} + +// DirectoryWithNewDirectoryOpts contains options for Directory.WithNewDirectory +type DirectoryWithNewDirectoryOpts struct { + // Permission granted to the created directory (e.g., 0777). + Permissions int +} + +// Retrieves this directory plus a new directory created at the given path. +func (r *Directory) WithNewDirectory(path string, opts ...DirectoryWithNewDirectoryOpts) *Directory { + q := r.query.Select("withNewDirectory") + for i := len(opts) - 1; i >= 0; i-- { + // `permissions` optional argument + if !querybuilder.IsZeroValue(opts[i].Permissions) { + q = q.Arg("permissions", opts[i].Permissions) + } + } + q = q.Arg("path", path) + + return &Directory{ + query: q, + } +} + +// DirectoryWithNewFileOpts contains options for Directory.WithNewFile +type DirectoryWithNewFileOpts struct { + // Permission given to the copied file (e.g., 0600). + Permissions int +} + +// Retrieves this directory plus a new file written at the given path. +func (r *Directory) WithNewFile(path string, contents string, opts ...DirectoryWithNewFileOpts) *Directory { + q := r.query.Select("withNewFile") + for i := len(opts) - 1; i >= 0; i-- { + // `permissions` optional argument + if !querybuilder.IsZeroValue(opts[i].Permissions) { + q = q.Arg("permissions", opts[i].Permissions) + } + } + q = q.Arg("path", path) + q = q.Arg("contents", contents) + + return &Directory{ + query: q, + } +} + +// Retrieves this directory with all file/dir timestamps set to the given time. +func (r *Directory) WithTimestamps(timestamp int) *Directory { + q := r.query.Select("withTimestamps") + q = q.Arg("timestamp", timestamp) + + return &Directory{ + query: q, + } +} + +// Retrieves this directory with the directory at the given path removed. +func (r *Directory) WithoutDirectory(path string) *Directory { + q := r.query.Select("withoutDirectory") + q = q.Arg("path", path) + + return &Directory{ + query: q, + } +} + +// Retrieves this directory with the file at the given path removed. +func (r *Directory) WithoutFile(path string) *Directory { + q := r.query.Select("withoutFile") + q = q.Arg("path", path) + + return &Directory{ + query: q, + } +} + +// An environment variable name and value. +type EnvVariable struct { + query *querybuilder.Selection + + id *EnvVariableID + name *string + value *string +} + +func (r *EnvVariable) WithGraphQLQuery(q *querybuilder.Selection) *EnvVariable { + return &EnvVariable{ + query: q, + } +} + +// A unique identifier for this EnvVariable. +func (r *EnvVariable) ID(ctx context.Context) (EnvVariableID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response EnvVariableID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *EnvVariable) XXX_GraphQLType() string { + return "EnvVariable" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *EnvVariable) XXX_GraphQLIDType() string { + return "EnvVariableID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *EnvVariable) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *EnvVariable) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *EnvVariable) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadEnvVariableFromID(EnvVariableID(id)) + return nil +} + +// The environment variable name. +func (r *EnvVariable) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The environment variable value. +func (r *EnvVariable) Value(ctx context.Context) (string, error) { + if r.value != nil { + return *r.value, nil + } + q := r.query.Select("value") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A definition of a field on a custom object defined in a Module. +// +// A field on an object has a static value, as opposed to a function on an object whose value is computed by invoking code (and can accept arguments). +type FieldTypeDef struct { + query *querybuilder.Selection + + description *string + id *FieldTypeDefID + name *string +} + +func (r *FieldTypeDef) WithGraphQLQuery(q *querybuilder.Selection) *FieldTypeDef { + return &FieldTypeDef{ + query: q, + } +} + +// A doc string for the field, if any. +func (r *FieldTypeDef) Description(ctx context.Context) (string, error) { + if r.description != nil { + return *r.description, nil + } + q := r.query.Select("description") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A unique identifier for this FieldTypeDef. +func (r *FieldTypeDef) ID(ctx context.Context) (FieldTypeDefID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response FieldTypeDefID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *FieldTypeDef) XXX_GraphQLType() string { + return "FieldTypeDef" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *FieldTypeDef) XXX_GraphQLIDType() string { + return "FieldTypeDefID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *FieldTypeDef) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *FieldTypeDef) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *FieldTypeDef) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadFieldTypeDefFromID(FieldTypeDefID(id)) + return nil +} + +// The name of the field in lowerCamelCase format. +func (r *FieldTypeDef) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The type of the field. +func (r *FieldTypeDef) TypeDef() *TypeDef { + q := r.query.Select("typeDef") + + return &TypeDef{ + query: q, + } +} + +// A file. +type File struct { + query *querybuilder.Selection + + contents *string + export *bool + id *FileID + name *string + size *int + sync *FileID +} +type WithFileFunc func(r *File) *File + +// With calls the provided function with current File. +// +// This is useful for reusability and readability by not breaking the calling chain. +func (r *File) With(f WithFileFunc) *File { + return f(r) +} + +func (r *File) WithGraphQLQuery(q *querybuilder.Selection) *File { + return &File{ + query: q, + } +} + +// Retrieves the contents of the file. +func (r *File) Contents(ctx context.Context) (string, error) { + if r.contents != nil { + return *r.contents, nil + } + q := r.query.Select("contents") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// FileExportOpts contains options for File.Export +type FileExportOpts struct { + // If allowParentDirPath is true, the path argument can be a directory path, in which case the file will be created in that directory. + AllowParentDirPath bool +} + +// Writes the file to a file path on the host. +func (r *File) Export(ctx context.Context, path string, opts ...FileExportOpts) (bool, error) { + if r.export != nil { + return *r.export, nil + } + q := r.query.Select("export") + for i := len(opts) - 1; i >= 0; i-- { + // `allowParentDirPath` optional argument + if !querybuilder.IsZeroValue(opts[i].AllowParentDirPath) { + q = q.Arg("allowParentDirPath", opts[i].AllowParentDirPath) + } + } + q = q.Arg("path", path) + + var response bool + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A unique identifier for this File. +func (r *File) ID(ctx context.Context) (FileID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response FileID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *File) XXX_GraphQLType() string { + return "File" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *File) XXX_GraphQLIDType() string { + return "FileID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *File) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *File) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *File) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadFileFromID(FileID(id)) + return nil +} + +// Retrieves the name of the file. +func (r *File) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Retrieves the size of the file, in bytes. +func (r *File) Size(ctx context.Context) (int, error) { + if r.size != nil { + return *r.size, nil + } + q := r.query.Select("size") + + var response int + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Force evaluation in the engine. +func (r *File) Sync(ctx context.Context) (*File, error) { + q := r.query.Select("sync") + + return r, q.Execute(ctx) +} + +// Retrieves this file with its name set to the given name. +func (r *File) WithName(name string) *File { + q := r.query.Select("withName") + q = q.Arg("name", name) + + return &File{ + query: q, + } +} + +// Retrieves this file with its created/modified timestamps set to the given time. +func (r *File) WithTimestamps(timestamp int) *File { + q := r.query.Select("withTimestamps") + q = q.Arg("timestamp", timestamp) + + return &File{ + query: q, + } +} + +// Function represents a resolver provided by a Module. +// +// A function always evaluates against a parent object and is given a set of named arguments. +type Function struct { + query *querybuilder.Selection + + description *string + id *FunctionID + name *string +} +type WithFunctionFunc func(r *Function) *Function + +// With calls the provided function with current Function. +// +// This is useful for reusability and readability by not breaking the calling chain. +func (r *Function) With(f WithFunctionFunc) *Function { + return f(r) +} + +func (r *Function) WithGraphQLQuery(q *querybuilder.Selection) *Function { + return &Function{ + query: q, + } +} + +// Arguments accepted by the function, if any. +func (r *Function) Args(ctx context.Context) ([]FunctionArg, error) { + q := r.query.Select("args") + + q = q.Select("id") + + type args struct { + Id FunctionArgID + } + + convert := func(fields []args) []FunctionArg { + out := []FunctionArg{} + + for i := range fields { + val := FunctionArg{id: &fields[i].Id} + val.query = q.Root().Select("loadFunctionArgFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []args + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// A doc string for the function, if any. +func (r *Function) Description(ctx context.Context) (string, error) { + if r.description != nil { + return *r.description, nil + } + q := r.query.Select("description") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A unique identifier for this Function. +func (r *Function) ID(ctx context.Context) (FunctionID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response FunctionID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *Function) XXX_GraphQLType() string { + return "Function" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *Function) XXX_GraphQLIDType() string { + return "FunctionID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *Function) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *Function) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *Function) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadFunctionFromID(FunctionID(id)) + return nil +} + +// The name of the function. +func (r *Function) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The type returned by the function. +func (r *Function) ReturnType() *TypeDef { + q := r.query.Select("returnType") + + return &TypeDef{ + query: q, + } +} + +// FunctionWithArgOpts contains options for Function.WithArg +type FunctionWithArgOpts struct { + // A doc string for the argument, if any + Description string + // A default value to use for this argument if not explicitly set by the caller, if any + DefaultValue JSON +} + +// Returns the function with the provided argument +func (r *Function) WithArg(name string, typeDef *TypeDef, opts ...FunctionWithArgOpts) *Function { + assertNotNil("typeDef", typeDef) + q := r.query.Select("withArg") + for i := len(opts) - 1; i >= 0; i-- { + // `description` optional argument + if !querybuilder.IsZeroValue(opts[i].Description) { + q = q.Arg("description", opts[i].Description) + } + // `defaultValue` optional argument + if !querybuilder.IsZeroValue(opts[i].DefaultValue) { + q = q.Arg("defaultValue", opts[i].DefaultValue) + } + } + q = q.Arg("name", name) + q = q.Arg("typeDef", typeDef) + + return &Function{ + query: q, + } +} + +// Returns the function with the given doc string. +func (r *Function) WithDescription(description string) *Function { + q := r.query.Select("withDescription") + q = q.Arg("description", description) + + return &Function{ + query: q, + } +} + +// An argument accepted by a function. +// +// This is a specification for an argument at function definition time, not an argument passed at function call time. +type FunctionArg struct { + query *querybuilder.Selection + + defaultValue *JSON + description *string + id *FunctionArgID + name *string +} + +func (r *FunctionArg) WithGraphQLQuery(q *querybuilder.Selection) *FunctionArg { + return &FunctionArg{ + query: q, + } +} + +// A default value to use for this argument when not explicitly set by the caller, if any. +func (r *FunctionArg) DefaultValue(ctx context.Context) (JSON, error) { + if r.defaultValue != nil { + return *r.defaultValue, nil + } + q := r.query.Select("defaultValue") + + var response JSON + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A doc string for the argument, if any. +func (r *FunctionArg) Description(ctx context.Context) (string, error) { + if r.description != nil { + return *r.description, nil + } + q := r.query.Select("description") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A unique identifier for this FunctionArg. +func (r *FunctionArg) ID(ctx context.Context) (FunctionArgID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response FunctionArgID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *FunctionArg) XXX_GraphQLType() string { + return "FunctionArg" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *FunctionArg) XXX_GraphQLIDType() string { + return "FunctionArgID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *FunctionArg) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *FunctionArg) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *FunctionArg) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadFunctionArgFromID(FunctionArgID(id)) + return nil +} + +// The name of the argument in lowerCamelCase format. +func (r *FunctionArg) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The type of the argument. +func (r *FunctionArg) TypeDef() *TypeDef { + q := r.query.Select("typeDef") + + return &TypeDef{ + query: q, + } +} + +// An active function call. +type FunctionCall struct { + query *querybuilder.Selection + + id *FunctionCallID + name *string + parent *JSON + parentName *string + returnValue *Void +} + +func (r *FunctionCall) WithGraphQLQuery(q *querybuilder.Selection) *FunctionCall { + return &FunctionCall{ + query: q, + } +} + +// A unique identifier for this FunctionCall. +func (r *FunctionCall) ID(ctx context.Context) (FunctionCallID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response FunctionCallID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *FunctionCall) XXX_GraphQLType() string { + return "FunctionCall" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *FunctionCall) XXX_GraphQLIDType() string { + return "FunctionCallID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *FunctionCall) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *FunctionCall) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *FunctionCall) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadFunctionCallFromID(FunctionCallID(id)) + return nil +} + +// The argument values the function is being invoked with. +func (r *FunctionCall) InputArgs(ctx context.Context) ([]FunctionCallArgValue, error) { + q := r.query.Select("inputArgs") + + q = q.Select("id") + + type inputArgs struct { + Id FunctionCallArgValueID + } + + convert := func(fields []inputArgs) []FunctionCallArgValue { + out := []FunctionCallArgValue{} + + for i := range fields { + val := FunctionCallArgValue{id: &fields[i].Id} + val.query = q.Root().Select("loadFunctionCallArgValueFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []inputArgs + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// The name of the function being called. +func (r *FunctionCall) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The value of the parent object of the function being called. If the function is top-level to the module, this is always an empty object. +func (r *FunctionCall) Parent(ctx context.Context) (JSON, error) { + if r.parent != nil { + return *r.parent, nil + } + q := r.query.Select("parent") + + var response JSON + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The name of the parent object of the function being called. If the function is top-level to the module, this is the name of the module. +func (r *FunctionCall) ParentName(ctx context.Context) (string, error) { + if r.parentName != nil { + return *r.parentName, nil + } + q := r.query.Select("parentName") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Set the return value of the function call to the provided value. +func (r *FunctionCall) ReturnValue(ctx context.Context, value JSON) (Void, error) { + if r.returnValue != nil { + return *r.returnValue, nil + } + q := r.query.Select("returnValue") + q = q.Arg("value", value) + + var response Void + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A value passed as a named argument to a function call. +type FunctionCallArgValue struct { + query *querybuilder.Selection + + id *FunctionCallArgValueID + name *string + value *JSON +} + +func (r *FunctionCallArgValue) WithGraphQLQuery(q *querybuilder.Selection) *FunctionCallArgValue { + return &FunctionCallArgValue{ + query: q, + } +} + +// A unique identifier for this FunctionCallArgValue. +func (r *FunctionCallArgValue) ID(ctx context.Context) (FunctionCallArgValueID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response FunctionCallArgValueID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *FunctionCallArgValue) XXX_GraphQLType() string { + return "FunctionCallArgValue" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *FunctionCallArgValue) XXX_GraphQLIDType() string { + return "FunctionCallArgValueID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *FunctionCallArgValue) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *FunctionCallArgValue) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *FunctionCallArgValue) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadFunctionCallArgValueFromID(FunctionCallArgValueID(id)) + return nil +} + +// The name of the argument. +func (r *FunctionCallArgValue) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The value of the argument represented as a JSON serialized string. +func (r *FunctionCallArgValue) Value(ctx context.Context) (JSON, error) { + if r.value != nil { + return *r.value, nil + } + q := r.query.Select("value") + + var response JSON + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The result of running an SDK's codegen. +type GeneratedCode struct { + query *querybuilder.Selection + + id *GeneratedCodeID +} +type WithGeneratedCodeFunc func(r *GeneratedCode) *GeneratedCode + +// With calls the provided function with current GeneratedCode. +// +// This is useful for reusability and readability by not breaking the calling chain. +func (r *GeneratedCode) With(f WithGeneratedCodeFunc) *GeneratedCode { + return f(r) +} + +func (r *GeneratedCode) WithGraphQLQuery(q *querybuilder.Selection) *GeneratedCode { + return &GeneratedCode{ + query: q, + } +} + +// The directory containing the generated code. +func (r *GeneratedCode) Code() *Directory { + q := r.query.Select("code") + + return &Directory{ + query: q, + } +} + +// A unique identifier for this GeneratedCode. +func (r *GeneratedCode) ID(ctx context.Context) (GeneratedCodeID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response GeneratedCodeID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *GeneratedCode) XXX_GraphQLType() string { + return "GeneratedCode" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *GeneratedCode) XXX_GraphQLIDType() string { + return "GeneratedCodeID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *GeneratedCode) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *GeneratedCode) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *GeneratedCode) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadGeneratedCodeFromID(GeneratedCodeID(id)) + return nil +} + +// List of paths to mark generated in version control (i.e. .gitattributes). +func (r *GeneratedCode) VcsGeneratedPaths(ctx context.Context) ([]string, error) { + q := r.query.Select("vcsGeneratedPaths") + + var response []string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// List of paths to ignore in version control (i.e. .gitignore). +func (r *GeneratedCode) VcsIgnoredPaths(ctx context.Context) ([]string, error) { + q := r.query.Select("vcsIgnoredPaths") + + var response []string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Set the list of paths to mark generated in version control. +func (r *GeneratedCode) WithVCSGeneratedPaths(paths []string) *GeneratedCode { + q := r.query.Select("withVCSGeneratedPaths") + q = q.Arg("paths", paths) + + return &GeneratedCode{ + query: q, + } +} + +// Set the list of paths to ignore in version control. +func (r *GeneratedCode) WithVCSIgnoredPaths(paths []string) *GeneratedCode { + q := r.query.Select("withVCSIgnoredPaths") + q = q.Arg("paths", paths) + + return &GeneratedCode{ + query: q, + } +} + +// Module source originating from a git repo. +type GitModuleSource struct { + query *querybuilder.Selection + + cloneURL *string + commit *string + htmlURL *string + id *GitModuleSourceID + root *string + rootSubpath *string + version *string +} + +func (r *GitModuleSource) WithGraphQLQuery(q *querybuilder.Selection) *GitModuleSource { + return &GitModuleSource{ + query: q, + } +} + +// The URL to clone the root of the git repo from +func (r *GitModuleSource) CloneURL(ctx context.Context) (string, error) { + if r.cloneURL != nil { + return *r.cloneURL, nil + } + q := r.query.Select("cloneURL") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The resolved commit of the git repo this source points to. +func (r *GitModuleSource) Commit(ctx context.Context) (string, error) { + if r.commit != nil { + return *r.commit, nil + } + q := r.query.Select("commit") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The directory containing everything needed to load load and use the module. +func (r *GitModuleSource) ContextDirectory() *Directory { + q := r.query.Select("contextDirectory") + + return &Directory{ + query: q, + } +} + +// The URL to the source's git repo in a web browser +func (r *GitModuleSource) HTMLURL(ctx context.Context) (string, error) { + if r.htmlURL != nil { + return *r.htmlURL, nil + } + q := r.query.Select("htmlURL") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A unique identifier for this GitModuleSource. +func (r *GitModuleSource) ID(ctx context.Context) (GitModuleSourceID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response GitModuleSourceID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *GitModuleSource) XXX_GraphQLType() string { + return "GitModuleSource" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *GitModuleSource) XXX_GraphQLIDType() string { + return "GitModuleSourceID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *GitModuleSource) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *GitModuleSource) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *GitModuleSource) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadGitModuleSourceFromID(GitModuleSourceID(id)) + return nil +} + +// The clean module name of the root of the module +func (r *GitModuleSource) Root(ctx context.Context) (string, error) { + if r.root != nil { + return *r.root, nil + } + q := r.query.Select("root") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The path to the root of the module source under the context directory. This directory contains its configuration file. It also contains its source code (possibly as a subdirectory). +func (r *GitModuleSource) RootSubpath(ctx context.Context) (string, error) { + if r.rootSubpath != nil { + return *r.rootSubpath, nil + } + q := r.query.Select("rootSubpath") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The specified version of the git repo this source points to. +func (r *GitModuleSource) Version(ctx context.Context) (string, error) { + if r.version != nil { + return *r.version, nil + } + q := r.query.Select("version") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A git ref (tag, branch, or commit). +type GitRef struct { + query *querybuilder.Selection + + commit *string + id *GitRefID +} + +func (r *GitRef) WithGraphQLQuery(q *querybuilder.Selection) *GitRef { + return &GitRef{ + query: q, + } +} + +// The resolved commit id at this ref. +func (r *GitRef) Commit(ctx context.Context) (string, error) { + if r.commit != nil { + return *r.commit, nil + } + q := r.query.Select("commit") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A unique identifier for this GitRef. +func (r *GitRef) ID(ctx context.Context) (GitRefID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response GitRefID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *GitRef) XXX_GraphQLType() string { + return "GitRef" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *GitRef) XXX_GraphQLIDType() string { + return "GitRefID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *GitRef) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *GitRef) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *GitRef) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadGitRefFromID(GitRefID(id)) + return nil +} + +// GitRefTreeOpts contains options for GitRef.Tree +type GitRefTreeOpts struct { + // DEPRECATED: This option should be passed to `git` instead. + SSHKnownHosts string + // DEPRECATED: This option should be passed to `git` instead. + SSHAuthSocket *Socket +} + +// The filesystem tree at this ref. +func (r *GitRef) Tree(opts ...GitRefTreeOpts) *Directory { + q := r.query.Select("tree") + for i := len(opts) - 1; i >= 0; i-- { + // `sshKnownHosts` optional argument + if !querybuilder.IsZeroValue(opts[i].SSHKnownHosts) { + q = q.Arg("sshKnownHosts", opts[i].SSHKnownHosts) + } + // `sshAuthSocket` optional argument + if !querybuilder.IsZeroValue(opts[i].SSHAuthSocket) { + q = q.Arg("sshAuthSocket", opts[i].SSHAuthSocket) + } + } + + return &Directory{ + query: q, + } +} + +// A git repository. +type GitRepository struct { + query *querybuilder.Selection + + id *GitRepositoryID +} +type WithGitRepositoryFunc func(r *GitRepository) *GitRepository + +// With calls the provided function with current GitRepository. +// +// This is useful for reusability and readability by not breaking the calling chain. +func (r *GitRepository) With(f WithGitRepositoryFunc) *GitRepository { + return f(r) +} + +func (r *GitRepository) WithGraphQLQuery(q *querybuilder.Selection) *GitRepository { + return &GitRepository{ + query: q, + } +} + +// Returns details of a branch. +func (r *GitRepository) Branch(name string) *GitRef { + q := r.query.Select("branch") + q = q.Arg("name", name) + + return &GitRef{ + query: q, + } +} + +// Returns details of a commit. +func (r *GitRepository) Commit(id string) *GitRef { + q := r.query.Select("commit") + q = q.Arg("id", id) + + return &GitRef{ + query: q, + } +} + +// Returns details for HEAD. +func (r *GitRepository) Head() *GitRef { + q := r.query.Select("head") + + return &GitRef{ + query: q, + } +} + +// A unique identifier for this GitRepository. +func (r *GitRepository) ID(ctx context.Context) (GitRepositoryID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response GitRepositoryID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *GitRepository) XXX_GraphQLType() string { + return "GitRepository" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *GitRepository) XXX_GraphQLIDType() string { + return "GitRepositoryID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *GitRepository) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *GitRepository) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *GitRepository) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadGitRepositoryFromID(GitRepositoryID(id)) + return nil +} + +// Returns details of a ref. +func (r *GitRepository) Ref(name string) *GitRef { + q := r.query.Select("ref") + q = q.Arg("name", name) + + return &GitRef{ + query: q, + } +} + +// Returns details of a tag. +func (r *GitRepository) Tag(name string) *GitRef { + q := r.query.Select("tag") + q = q.Arg("name", name) + + return &GitRef{ + query: q, + } +} + +// Header to authenticate the remote with. +func (r *GitRepository) WithAuthHeader(header *Secret) *GitRepository { + assertNotNil("header", header) + q := r.query.Select("withAuthHeader") + q = q.Arg("header", header) + + return &GitRepository{ + query: q, + } +} + +// Token to authenticate the remote with. +func (r *GitRepository) WithAuthToken(token *Secret) *GitRepository { + assertNotNil("token", token) + q := r.query.Select("withAuthToken") + q = q.Arg("token", token) + + return &GitRepository{ + query: q, + } +} + +// A graphql input type, which is essentially just a group of named args. +// This is currently only used to represent pre-existing usage of graphql input types +// in the core API. It is not used by user modules and shouldn't ever be as user +// module accept input objects via their id rather than graphql input types. +type InputTypeDef struct { + query *querybuilder.Selection + + id *InputTypeDefID + name *string +} + +func (r *InputTypeDef) WithGraphQLQuery(q *querybuilder.Selection) *InputTypeDef { + return &InputTypeDef{ + query: q, + } +} + +// Static fields defined on this input object, if any. +func (r *InputTypeDef) Fields(ctx context.Context) ([]FieldTypeDef, error) { + q := r.query.Select("fields") + + q = q.Select("id") + + type fields struct { + Id FieldTypeDefID + } + + convert := func(fields []fields) []FieldTypeDef { + out := []FieldTypeDef{} + + for i := range fields { + val := FieldTypeDef{id: &fields[i].Id} + val.query = q.Root().Select("loadFieldTypeDefFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []fields + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// A unique identifier for this InputTypeDef. +func (r *InputTypeDef) ID(ctx context.Context) (InputTypeDefID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response InputTypeDefID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *InputTypeDef) XXX_GraphQLType() string { + return "InputTypeDef" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *InputTypeDef) XXX_GraphQLIDType() string { + return "InputTypeDefID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *InputTypeDef) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *InputTypeDef) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *InputTypeDef) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadInputTypeDefFromID(InputTypeDefID(id)) + return nil +} + +// The name of the input object. +func (r *InputTypeDef) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A definition of a custom interface defined in a Module. +type InterfaceTypeDef struct { + query *querybuilder.Selection + + description *string + id *InterfaceTypeDefID + name *string + sourceModuleName *string +} + +func (r *InterfaceTypeDef) WithGraphQLQuery(q *querybuilder.Selection) *InterfaceTypeDef { + return &InterfaceTypeDef{ + query: q, + } +} + +// The doc string for the interface, if any. +func (r *InterfaceTypeDef) Description(ctx context.Context) (string, error) { + if r.description != nil { + return *r.description, nil + } + q := r.query.Select("description") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Functions defined on this interface, if any. +func (r *InterfaceTypeDef) Functions(ctx context.Context) ([]Function, error) { + q := r.query.Select("functions") + + q = q.Select("id") + + type functions struct { + Id FunctionID + } + + convert := func(fields []functions) []Function { + out := []Function{} + + for i := range fields { + val := Function{id: &fields[i].Id} + val.query = q.Root().Select("loadFunctionFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []functions + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// A unique identifier for this InterfaceTypeDef. +func (r *InterfaceTypeDef) ID(ctx context.Context) (InterfaceTypeDefID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response InterfaceTypeDefID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *InterfaceTypeDef) XXX_GraphQLType() string { + return "InterfaceTypeDef" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *InterfaceTypeDef) XXX_GraphQLIDType() string { + return "InterfaceTypeDefID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *InterfaceTypeDef) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *InterfaceTypeDef) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *InterfaceTypeDef) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadInterfaceTypeDefFromID(InterfaceTypeDefID(id)) + return nil +} + +// The name of the interface. +func (r *InterfaceTypeDef) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// If this InterfaceTypeDef is associated with a Module, the name of the module. Unset otherwise. +func (r *InterfaceTypeDef) SourceModuleName(ctx context.Context) (string, error) { + if r.sourceModuleName != nil { + return *r.sourceModuleName, nil + } + q := r.query.Select("sourceModuleName") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +type K3S struct { + query *querybuilder.Selection + + id *K3SID + kubectl *string +} +type WithK3SFunc func(r *K3S) *K3S + +// With calls the provided function with current K3S. +// +// This is useful for reusability and readability by not breaking the calling chain. +func (r *K3S) With(f WithK3SFunc) *K3S { + return f(r) +} + +func (r *K3S) WithGraphQLQuery(q *querybuilder.Selection) *K3S { + return &K3S{ + query: q, + } +} + +// returns the config file for the k3s cluster +func (r *K3S) Config(local bool) *File { + q := r.query.Select("config") + q = q.Arg("local", local) + + return &File{ + query: q, + } +} + +func (r *K3S) Container() *Container { + q := r.query.Select("container") + + return &Container{ + query: q, + } +} + +// A unique identifier for this K3S. +func (r *K3S) ID(ctx context.Context) (K3SID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response K3SID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *K3S) XXX_GraphQLType() string { + return "K3S" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *K3S) XXX_GraphQLIDType() string { + return "K3SID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *K3S) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *K3S) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *K3S) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadK3SFromID(K3SID(id)) + return nil +} + +// runs kubectl on the target k3s cluster +func (r *K3S) Kubectl(ctx context.Context, args string) (string, error) { + if r.kubectl != nil { + return *r.kubectl, nil + } + q := r.query.Select("kubectl") + q = q.Arg("args", args) + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Returns a newly initialized kind cluster +func (r *K3S) Server() *Service { + q := r.query.Select("server") + + return &Service{ + query: q, + } +} + +// Returns a newly initialized kind cluster +func (r *K3S) WithContainer(c *Container) *K3S { + assertNotNil("c", c) + q := r.query.Select("withContainer") + q = q.Arg("c", c) + + return &K3S{ + query: q, + } +} + +// A simple key value object that represents a label. +type Label struct { + query *querybuilder.Selection + + id *LabelID + name *string + value *string +} + +func (r *Label) WithGraphQLQuery(q *querybuilder.Selection) *Label { + return &Label{ + query: q, + } +} + +// A unique identifier for this Label. +func (r *Label) ID(ctx context.Context) (LabelID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response LabelID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *Label) XXX_GraphQLType() string { + return "Label" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *Label) XXX_GraphQLIDType() string { + return "LabelID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *Label) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *Label) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *Label) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadLabelFromID(LabelID(id)) + return nil +} + +// The label name. +func (r *Label) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The label value. +func (r *Label) Value(ctx context.Context) (string, error) { + if r.value != nil { + return *r.value, nil + } + q := r.query.Select("value") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A definition of a list type in a Module. +type ListTypeDef struct { + query *querybuilder.Selection + + id *ListTypeDefID +} + +func (r *ListTypeDef) WithGraphQLQuery(q *querybuilder.Selection) *ListTypeDef { + return &ListTypeDef{ + query: q, + } +} + +// The type of the elements in the list. +func (r *ListTypeDef) ElementTypeDef() *TypeDef { + q := r.query.Select("elementTypeDef") + + return &TypeDef{ + query: q, + } +} + +// A unique identifier for this ListTypeDef. +func (r *ListTypeDef) ID(ctx context.Context) (ListTypeDefID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response ListTypeDefID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *ListTypeDef) XXX_GraphQLType() string { + return "ListTypeDef" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *ListTypeDef) XXX_GraphQLIDType() string { + return "ListTypeDefID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *ListTypeDef) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *ListTypeDef) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *ListTypeDef) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadListTypeDefFromID(ListTypeDefID(id)) + return nil +} + +// Module source that that originates from a path locally relative to an arbitrary directory. +type LocalModuleSource struct { + query *querybuilder.Selection + + id *LocalModuleSourceID + rootSubpath *string +} + +func (r *LocalModuleSource) WithGraphQLQuery(q *querybuilder.Selection) *LocalModuleSource { + return &LocalModuleSource{ + query: q, + } +} + +// The directory containing everything needed to load load and use the module. +func (r *LocalModuleSource) ContextDirectory() *Directory { + q := r.query.Select("contextDirectory") + + return &Directory{ + query: q, + } +} + +// A unique identifier for this LocalModuleSource. +func (r *LocalModuleSource) ID(ctx context.Context) (LocalModuleSourceID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response LocalModuleSourceID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *LocalModuleSource) XXX_GraphQLType() string { + return "LocalModuleSource" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *LocalModuleSource) XXX_GraphQLIDType() string { + return "LocalModuleSourceID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *LocalModuleSource) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *LocalModuleSource) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *LocalModuleSource) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadLocalModuleSourceFromID(LocalModuleSourceID(id)) + return nil +} + +// The path to the root of the module source under the context directory. This directory contains its configuration file. It also contains its source code (possibly as a subdirectory). +func (r *LocalModuleSource) RootSubpath(ctx context.Context) (string, error) { + if r.rootSubpath != nil { + return *r.rootSubpath, nil + } + q := r.query.Select("rootSubpath") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A Dagger module. +type Module struct { + query *querybuilder.Selection + + description *string + id *ModuleID + name *string + sdk *string + serve *Void +} +type WithModuleFunc func(r *Module) *Module + +// With calls the provided function with current Module. +// +// This is useful for reusability and readability by not breaking the calling chain. +func (r *Module) With(f WithModuleFunc) *Module { + return f(r) +} + +func (r *Module) WithGraphQLQuery(q *querybuilder.Selection) *Module { + return &Module{ + query: q, + } +} + +// Modules used by this module. +func (r *Module) Dependencies(ctx context.Context) ([]Module, error) { + q := r.query.Select("dependencies") + + q = q.Select("id") + + type dependencies struct { + Id ModuleID + } + + convert := func(fields []dependencies) []Module { + out := []Module{} + + for i := range fields { + val := Module{id: &fields[i].Id} + val.query = q.Root().Select("loadModuleFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []dependencies + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// The dependencies as configured by the module. +func (r *Module) DependencyConfig(ctx context.Context) ([]ModuleDependency, error) { + q := r.query.Select("dependencyConfig") + + q = q.Select("id") + + type dependencyConfig struct { + Id ModuleDependencyID + } + + convert := func(fields []dependencyConfig) []ModuleDependency { + out := []ModuleDependency{} + + for i := range fields { + val := ModuleDependency{id: &fields[i].Id} + val.query = q.Root().Select("loadModuleDependencyFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []dependencyConfig + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// The doc string of the module, if any +func (r *Module) Description(ctx context.Context) (string, error) { + if r.description != nil { + return *r.description, nil + } + q := r.query.Select("description") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The generated files and directories made on top of the module source's context directory. +func (r *Module) GeneratedContextDiff() *Directory { + q := r.query.Select("generatedContextDiff") + + return &Directory{ + query: q, + } +} + +// The module source's context plus any configuration and source files created by codegen. +func (r *Module) GeneratedContextDirectory() *Directory { + q := r.query.Select("generatedContextDirectory") + + return &Directory{ + query: q, + } +} + +// A unique identifier for this Module. +func (r *Module) ID(ctx context.Context) (ModuleID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response ModuleID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *Module) XXX_GraphQLType() string { + return "Module" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *Module) XXX_GraphQLIDType() string { + return "ModuleID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *Module) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *Module) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *Module) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadModuleFromID(ModuleID(id)) + return nil +} + +// Retrieves the module with the objects loaded via its SDK. +func (r *Module) Initialize() *Module { + q := r.query.Select("initialize") + + return &Module{ + query: q, + } +} + +// Interfaces served by this module. +func (r *Module) Interfaces(ctx context.Context) ([]TypeDef, error) { + q := r.query.Select("interfaces") + + q = q.Select("id") + + type interfaces struct { + Id TypeDefID + } + + convert := func(fields []interfaces) []TypeDef { + out := []TypeDef{} + + for i := range fields { + val := TypeDef{id: &fields[i].Id} + val.query = q.Root().Select("loadTypeDefFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []interfaces + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// The name of the module +func (r *Module) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Objects served by this module. +func (r *Module) Objects(ctx context.Context) ([]TypeDef, error) { + q := r.query.Select("objects") + + q = q.Select("id") + + type objects struct { + Id TypeDefID + } + + convert := func(fields []objects) []TypeDef { + out := []TypeDef{} + + for i := range fields { + val := TypeDef{id: &fields[i].Id} + val.query = q.Root().Select("loadTypeDefFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []objects + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// The container that runs the module's entrypoint. It will fail to execute if the module doesn't compile. +func (r *Module) Runtime() *Container { + q := r.query.Select("runtime") + + return &Container{ + query: q, + } +} + +// The SDK used by this module. Either a name of a builtin SDK or a module source ref string pointing to the SDK's implementation. +func (r *Module) SDK(ctx context.Context) (string, error) { + if r.sdk != nil { + return *r.sdk, nil + } + q := r.query.Select("sdk") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Serve a module's API in the current session. +// +// Note: this can only be called once per session. In the future, it could return a stream or service to remove the side effect. +func (r *Module) Serve(ctx context.Context) (Void, error) { + if r.serve != nil { + return *r.serve, nil + } + q := r.query.Select("serve") + + var response Void + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The source for the module. +func (r *Module) Source() *ModuleSource { + q := r.query.Select("source") + + return &ModuleSource{ + query: q, + } +} + +// Retrieves the module with the given description +func (r *Module) WithDescription(description string) *Module { + q := r.query.Select("withDescription") + q = q.Arg("description", description) + + return &Module{ + query: q, + } +} + +// This module plus the given Interface type and associated functions +func (r *Module) WithInterface(iface *TypeDef) *Module { + assertNotNil("iface", iface) + q := r.query.Select("withInterface") + q = q.Arg("iface", iface) + + return &Module{ + query: q, + } +} + +// This module plus the given Object type and associated functions. +func (r *Module) WithObject(object *TypeDef) *Module { + assertNotNil("object", object) + q := r.query.Select("withObject") + q = q.Arg("object", object) + + return &Module{ + query: q, + } +} + +// Retrieves the module with basic configuration loaded if present. +func (r *Module) WithSource(source *ModuleSource) *Module { + assertNotNil("source", source) + q := r.query.Select("withSource") + q = q.Arg("source", source) + + return &Module{ + query: q, + } +} + +// The configuration of dependency of a module. +type ModuleDependency struct { + query *querybuilder.Selection + + id *ModuleDependencyID + name *string +} + +func (r *ModuleDependency) WithGraphQLQuery(q *querybuilder.Selection) *ModuleDependency { + return &ModuleDependency{ + query: q, + } +} + +// A unique identifier for this ModuleDependency. +func (r *ModuleDependency) ID(ctx context.Context) (ModuleDependencyID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response ModuleDependencyID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *ModuleDependency) XXX_GraphQLType() string { + return "ModuleDependency" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *ModuleDependency) XXX_GraphQLIDType() string { + return "ModuleDependencyID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *ModuleDependency) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *ModuleDependency) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *ModuleDependency) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadModuleDependencyFromID(ModuleDependencyID(id)) + return nil +} + +// The name of the dependency module. +func (r *ModuleDependency) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The source for the dependency module. +func (r *ModuleDependency) Source() *ModuleSource { + q := r.query.Select("source") + + return &ModuleSource{ + query: q, + } +} + +// The source needed to load and run a module, along with any metadata about the source such as versions/urls/etc. +type ModuleSource struct { + query *querybuilder.Selection + + asString *string + configExists *bool + id *ModuleSourceID + kind *ModuleSourceKind + moduleName *string + moduleOriginalName *string + resolveContextPathFromCaller *string + sourceRootSubpath *string + sourceSubpath *string +} +type WithModuleSourceFunc func(r *ModuleSource) *ModuleSource + +// With calls the provided function with current ModuleSource. +// +// This is useful for reusability and readability by not breaking the calling chain. +func (r *ModuleSource) With(f WithModuleSourceFunc) *ModuleSource { + return f(r) +} + +func (r *ModuleSource) WithGraphQLQuery(q *querybuilder.Selection) *ModuleSource { + return &ModuleSource{ + query: q, + } +} + +// If the source is a of kind git, the git source representation of it. +func (r *ModuleSource) AsGitSource() *GitModuleSource { + q := r.query.Select("asGitSource") + + return &GitModuleSource{ + query: q, + } +} + +// If the source is of kind local, the local source representation of it. +func (r *ModuleSource) AsLocalSource() *LocalModuleSource { + q := r.query.Select("asLocalSource") + + return &LocalModuleSource{ + query: q, + } +} + +// Load the source as a module. If this is a local source, the parent directory must have been provided during module source creation +func (r *ModuleSource) AsModule() *Module { + q := r.query.Select("asModule") + + return &Module{ + query: q, + } +} + +// A human readable ref string representation of this module source. +func (r *ModuleSource) AsString(ctx context.Context) (string, error) { + if r.asString != nil { + return *r.asString, nil + } + q := r.query.Select("asString") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Returns whether the module source has a configuration file. +func (r *ModuleSource) ConfigExists(ctx context.Context) (bool, error) { + if r.configExists != nil { + return *r.configExists, nil + } + q := r.query.Select("configExists") + + var response bool + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The directory containing everything needed to load load and use the module. +func (r *ModuleSource) ContextDirectory() *Directory { + q := r.query.Select("contextDirectory") + + return &Directory{ + query: q, + } +} + +// The dependencies of the module source. Includes dependencies from the configuration and any extras from withDependencies calls. +func (r *ModuleSource) Dependencies(ctx context.Context) ([]ModuleDependency, error) { + q := r.query.Select("dependencies") + + q = q.Select("id") + + type dependencies struct { + Id ModuleDependencyID + } + + convert := func(fields []dependencies) []ModuleDependency { + out := []ModuleDependency{} + + for i := range fields { + val := ModuleDependency{id: &fields[i].Id} + val.query = q.Root().Select("loadModuleDependencyFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []dependencies + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// The directory containing the module configuration and source code (source code may be in a subdir). +func (r *ModuleSource) Directory(path string) *Directory { + q := r.query.Select("directory") + q = q.Arg("path", path) + + return &Directory{ + query: q, + } +} + +// A unique identifier for this ModuleSource. +func (r *ModuleSource) ID(ctx context.Context) (ModuleSourceID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response ModuleSourceID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *ModuleSource) XXX_GraphQLType() string { + return "ModuleSource" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *ModuleSource) XXX_GraphQLIDType() string { + return "ModuleSourceID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *ModuleSource) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *ModuleSource) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *ModuleSource) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadModuleSourceFromID(ModuleSourceID(id)) + return nil +} + +// The kind of source (e.g. local, git, etc.) +func (r *ModuleSource) Kind(ctx context.Context) (ModuleSourceKind, error) { + if r.kind != nil { + return *r.kind, nil + } + q := r.query.Select("kind") + + var response ModuleSourceKind + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// If set, the name of the module this source references, including any overrides at runtime by callers. +func (r *ModuleSource) ModuleName(ctx context.Context) (string, error) { + if r.moduleName != nil { + return *r.moduleName, nil + } + q := r.query.Select("moduleName") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The original name of the module this source references, as defined in the module configuration. +func (r *ModuleSource) ModuleOriginalName(ctx context.Context) (string, error) { + if r.moduleOriginalName != nil { + return *r.moduleOriginalName, nil + } + q := r.query.Select("moduleOriginalName") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The path to the module source's context directory on the caller's filesystem. Only valid for local sources. +func (r *ModuleSource) ResolveContextPathFromCaller(ctx context.Context) (string, error) { + if r.resolveContextPathFromCaller != nil { + return *r.resolveContextPathFromCaller, nil + } + q := r.query.Select("resolveContextPathFromCaller") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Resolve the provided module source arg as a dependency relative to this module source. +func (r *ModuleSource) ResolveDependency(dep *ModuleSource) *ModuleSource { + assertNotNil("dep", dep) + q := r.query.Select("resolveDependency") + q = q.Arg("dep", dep) + + return &ModuleSource{ + query: q, + } +} + +// ModuleSourceResolveDirectoryFromCallerOpts contains options for ModuleSource.ResolveDirectoryFromCaller +type ModuleSourceResolveDirectoryFromCallerOpts struct { + // If set, the name of the view to apply to the path. + ViewName string +} + +// Load a directory from the caller optionally with a given view applied. +func (r *ModuleSource) ResolveDirectoryFromCaller(path string, opts ...ModuleSourceResolveDirectoryFromCallerOpts) *Directory { + q := r.query.Select("resolveDirectoryFromCaller") + for i := len(opts) - 1; i >= 0; i-- { + // `viewName` optional argument + if !querybuilder.IsZeroValue(opts[i].ViewName) { + q = q.Arg("viewName", opts[i].ViewName) + } + } + q = q.Arg("path", path) + + return &Directory{ + query: q, + } +} + +// Load the source from its path on the caller's filesystem, including only needed+configured files and directories. Only valid for local sources. +func (r *ModuleSource) ResolveFromCaller() *ModuleSource { + q := r.query.Select("resolveFromCaller") + + return &ModuleSource{ + query: q, + } +} + +// The path relative to context of the root of the module source, which contains dagger.json. It also contains the module implementation source code, but that may or may not being a subdir of this root. +func (r *ModuleSource) SourceRootSubpath(ctx context.Context) (string, error) { + if r.sourceRootSubpath != nil { + return *r.sourceRootSubpath, nil + } + q := r.query.Select("sourceRootSubpath") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The path relative to context of the module implementation source code. +func (r *ModuleSource) SourceSubpath(ctx context.Context) (string, error) { + if r.sourceSubpath != nil { + return *r.sourceSubpath, nil + } + q := r.query.Select("sourceSubpath") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Retrieve a named view defined for this module source. +func (r *ModuleSource) View(name string) *ModuleSourceView { + q := r.query.Select("view") + q = q.Arg("name", name) + + return &ModuleSourceView{ + query: q, + } +} + +// The named views defined for this module source, which are sets of directory filters that can be applied to directory arguments provided to functions. +func (r *ModuleSource) Views(ctx context.Context) ([]ModuleSourceView, error) { + q := r.query.Select("views") + + q = q.Select("id") + + type views struct { + Id ModuleSourceViewID + } + + convert := func(fields []views) []ModuleSourceView { + out := []ModuleSourceView{} + + for i := range fields { + val := ModuleSourceView{id: &fields[i].Id} + val.query = q.Root().Select("loadModuleSourceViewFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []views + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// Update the module source with a new context directory. Only valid for local sources. +func (r *ModuleSource) WithContextDirectory(dir *Directory) *ModuleSource { + assertNotNil("dir", dir) + q := r.query.Select("withContextDirectory") + q = q.Arg("dir", dir) + + return &ModuleSource{ + query: q, + } +} + +// Append the provided dependencies to the module source's dependency list. +func (r *ModuleSource) WithDependencies(dependencies []*ModuleDependency) *ModuleSource { + q := r.query.Select("withDependencies") + q = q.Arg("dependencies", dependencies) + + return &ModuleSource{ + query: q, + } +} + +// Update the module source with a new name. +func (r *ModuleSource) WithName(name string) *ModuleSource { + q := r.query.Select("withName") + q = q.Arg("name", name) + + return &ModuleSource{ + query: q, + } +} + +// Update the module source with a new SDK. +func (r *ModuleSource) WithSDK(sdk string) *ModuleSource { + q := r.query.Select("withSDK") + q = q.Arg("sdk", sdk) + + return &ModuleSource{ + query: q, + } +} + +// Update the module source with a new source subpath. +func (r *ModuleSource) WithSourceSubpath(path string) *ModuleSource { + q := r.query.Select("withSourceSubpath") + q = q.Arg("path", path) + + return &ModuleSource{ + query: q, + } +} + +// Update the module source with a new named view. +func (r *ModuleSource) WithView(name string, patterns []string) *ModuleSource { + q := r.query.Select("withView") + q = q.Arg("name", name) + q = q.Arg("patterns", patterns) + + return &ModuleSource{ + query: q, + } +} + +// A named set of path filters that can be applied to directory arguments provided to functions. +type ModuleSourceView struct { + query *querybuilder.Selection + + id *ModuleSourceViewID + name *string +} + +func (r *ModuleSourceView) WithGraphQLQuery(q *querybuilder.Selection) *ModuleSourceView { + return &ModuleSourceView{ + query: q, + } +} + +// A unique identifier for this ModuleSourceView. +func (r *ModuleSourceView) ID(ctx context.Context) (ModuleSourceViewID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response ModuleSourceViewID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *ModuleSourceView) XXX_GraphQLType() string { + return "ModuleSourceView" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *ModuleSourceView) XXX_GraphQLIDType() string { + return "ModuleSourceViewID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *ModuleSourceView) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *ModuleSourceView) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *ModuleSourceView) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadModuleSourceViewFromID(ModuleSourceViewID(id)) + return nil +} + +// The name of the view +func (r *ModuleSourceView) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The patterns of the view used to filter paths +func (r *ModuleSourceView) Patterns(ctx context.Context) ([]string, error) { + q := r.query.Select("patterns") + + var response []string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A definition of a custom object defined in a Module. +type ObjectTypeDef struct { + query *querybuilder.Selection + + description *string + id *ObjectTypeDefID + name *string + sourceModuleName *string +} + +func (r *ObjectTypeDef) WithGraphQLQuery(q *querybuilder.Selection) *ObjectTypeDef { + return &ObjectTypeDef{ + query: q, + } +} + +// The function used to construct new instances of this object, if any +func (r *ObjectTypeDef) Constructor() *Function { + q := r.query.Select("constructor") + + return &Function{ + query: q, + } +} + +// The doc string for the object, if any. +func (r *ObjectTypeDef) Description(ctx context.Context) (string, error) { + if r.description != nil { + return *r.description, nil + } + q := r.query.Select("description") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Static fields defined on this object, if any. +func (r *ObjectTypeDef) Fields(ctx context.Context) ([]FieldTypeDef, error) { + q := r.query.Select("fields") + + q = q.Select("id") + + type fields struct { + Id FieldTypeDefID + } + + convert := func(fields []fields) []FieldTypeDef { + out := []FieldTypeDef{} + + for i := range fields { + val := FieldTypeDef{id: &fields[i].Id} + val.query = q.Root().Select("loadFieldTypeDefFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []fields + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// Functions defined on this object, if any. +func (r *ObjectTypeDef) Functions(ctx context.Context) ([]Function, error) { + q := r.query.Select("functions") + + q = q.Select("id") + + type functions struct { + Id FunctionID + } + + convert := func(fields []functions) []Function { + out := []Function{} + + for i := range fields { + val := Function{id: &fields[i].Id} + val.query = q.Root().Select("loadFunctionFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []functions + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// A unique identifier for this ObjectTypeDef. +func (r *ObjectTypeDef) ID(ctx context.Context) (ObjectTypeDefID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response ObjectTypeDefID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *ObjectTypeDef) XXX_GraphQLType() string { + return "ObjectTypeDef" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *ObjectTypeDef) XXX_GraphQLIDType() string { + return "ObjectTypeDefID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *ObjectTypeDef) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *ObjectTypeDef) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *ObjectTypeDef) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadObjectTypeDefFromID(ObjectTypeDefID(id)) + return nil +} + +// The name of the object. +func (r *ObjectTypeDef) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// If this ObjectTypeDef is associated with a Module, the name of the module. Unset otherwise. +func (r *ObjectTypeDef) SourceModuleName(ctx context.Context) (string, error) { + if r.sourceModuleName != nil { + return *r.sourceModuleName, nil + } + q := r.query.Select("sourceModuleName") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A port exposed by a container. +type Port struct { + query *querybuilder.Selection + + description *string + experimentalSkipHealthcheck *bool + id *PortID + port *int + protocol *NetworkProtocol +} + +func (r *Port) WithGraphQLQuery(q *querybuilder.Selection) *Port { + return &Port{ + query: q, + } +} + +// The port description. +func (r *Port) Description(ctx context.Context) (string, error) { + if r.description != nil { + return *r.description, nil + } + q := r.query.Select("description") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Skip the health check when run as a service. +func (r *Port) ExperimentalSkipHealthcheck(ctx context.Context) (bool, error) { + if r.experimentalSkipHealthcheck != nil { + return *r.experimentalSkipHealthcheck, nil + } + q := r.query.Select("experimentalSkipHealthcheck") + + var response bool + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A unique identifier for this Port. +func (r *Port) ID(ctx context.Context) (PortID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response PortID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *Port) XXX_GraphQLType() string { + return "Port" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *Port) XXX_GraphQLIDType() string { + return "PortID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *Port) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *Port) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *Port) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadPortFromID(PortID(id)) + return nil +} + +// The port number. +func (r *Port) Port(ctx context.Context) (int, error) { + if r.port != nil { + return *r.port, nil + } + q := r.query.Select("port") + + var response int + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The transport layer protocol. +func (r *Port) Protocol(ctx context.Context) (NetworkProtocol, error) { + if r.protocol != nil { + return *r.protocol, nil + } + q := r.query.Select("protocol") + + var response NetworkProtocol + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +type WithClientFunc func(r *Client) *Client + +// With calls the provided function with current Client. +// +// This is useful for reusability and readability by not breaking the calling chain. +func (r *Client) With(f WithClientFunc) *Client { + return f(r) +} + +func (r *Client) WithGraphQLQuery(q *querybuilder.Selection) *Client { + return &Client{ + query: q, + client: r.client, + } +} + +// Retrieves a content-addressed blob. +func (r *Client) Blob(digest string, size int, mediaType string, uncompressed string) *Directory { + q := r.query.Select("blob") + q = q.Arg("digest", digest) + q = q.Arg("size", size) + q = q.Arg("mediaType", mediaType) + q = q.Arg("uncompressed", uncompressed) + + return &Directory{ + query: q, + } +} + +// Retrieves a container builtin to the engine. +func (r *Client) BuiltinContainer(digest string) *Container { + q := r.query.Select("builtinContainer") + q = q.Arg("digest", digest) + + return &Container{ + query: q, + } +} + +// Constructs a cache volume for a given cache key. +func (r *Client) CacheVolume(key string) *CacheVolume { + q := r.query.Select("cacheVolume") + q = q.Arg("key", key) + + return &CacheVolume{ + query: q, + } +} + +// Checks if the current Dagger Engine is compatible with an SDK's required version. +func (r *Client) CheckVersionCompatibility(ctx context.Context, version string) (bool, error) { + q := r.query.Select("checkVersionCompatibility") + q = q.Arg("version", version) + + var response bool + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// ContainerOpts contains options for Client.Container +type ContainerOpts struct { + // DEPRECATED: Use `loadContainerFromID` instead. + ID ContainerID + // Platform to initialize the container with. + Platform Platform +} + +// Creates a scratch container. +// +// Optional platform argument initializes new containers to execute and publish as that platform. Platform defaults to that of the builder's host. +func (r *Client) Container(opts ...ContainerOpts) *Container { + q := r.query.Select("container") + for i := len(opts) - 1; i >= 0; i-- { + // `id` optional argument + if !querybuilder.IsZeroValue(opts[i].ID) { + q = q.Arg("id", opts[i].ID) + } + // `platform` optional argument + if !querybuilder.IsZeroValue(opts[i].Platform) { + q = q.Arg("platform", opts[i].Platform) + } + } + + return &Container{ + query: q, + } +} + +// The FunctionCall context that the SDK caller is currently executing in. +// +// If the caller is not currently executing in a function, this will return an error. +func (r *Client) CurrentFunctionCall() *FunctionCall { + q := r.query.Select("currentFunctionCall") + + return &FunctionCall{ + query: q, + } +} + +// The module currently being served in the session, if any. +func (r *Client) CurrentModule() *CurrentModule { + q := r.query.Select("currentModule") + + return &CurrentModule{ + query: q, + } +} + +// The TypeDef representations of the objects currently being served in the session. +func (r *Client) CurrentTypeDefs(ctx context.Context) ([]TypeDef, error) { + q := r.query.Select("currentTypeDefs") + + q = q.Select("id") + + type currentTypeDefs struct { + Id TypeDefID + } + + convert := func(fields []currentTypeDefs) []TypeDef { + out := []TypeDef{} + + for i := range fields { + val := TypeDef{id: &fields[i].Id} + val.query = q.Root().Select("loadTypeDefFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []currentTypeDefs + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// The default platform of the engine. +func (r *Client) DefaultPlatform(ctx context.Context) (Platform, error) { + q := r.query.Select("defaultPlatform") + + var response Platform + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// DirectoryOpts contains options for Client.Directory +type DirectoryOpts struct { + // DEPRECATED: Use `loadDirectoryFromID` instead. + ID DirectoryID +} + +// Creates an empty directory. +func (r *Client) Directory(opts ...DirectoryOpts) *Directory { + q := r.query.Select("directory") + for i := len(opts) - 1; i >= 0; i-- { + // `id` optional argument + if !querybuilder.IsZeroValue(opts[i].ID) { + q = q.Arg("id", opts[i].ID) + } + } + + return &Directory{ + query: q, + } +} + +// Deprecated: Use LoadFileFromID instead. +func (r *Client) File(id FileID) *File { + q := r.query.Select("file") + q = q.Arg("id", id) + + return &File{ + query: q, + } +} + +// Creates a function. +func (r *Client) Function(name string, returnType *TypeDef) *Function { + assertNotNil("returnType", returnType) + q := r.query.Select("function") + q = q.Arg("name", name) + q = q.Arg("returnType", returnType) + + return &Function{ + query: q, + } +} + +// Create a code generation result, given a directory containing the generated code. +func (r *Client) GeneratedCode(code *Directory) *GeneratedCode { + assertNotNil("code", code) + q := r.query.Select("generatedCode") + q = q.Arg("code", code) + + return &GeneratedCode{ + query: q, + } +} + +// GitOpts contains options for Client.Git +type GitOpts struct { + // Set to true to keep .git directory. + KeepGitDir bool + // A service which must be started before the repo is fetched. + ExperimentalServiceHost *Service + // Set SSH known hosts + SSHKnownHosts string + // Set SSH auth socket + SSHAuthSocket *Socket +} + +// Queries a Git repository. +func (r *Client) Git(url string, opts ...GitOpts) *GitRepository { + q := r.query.Select("git") + for i := len(opts) - 1; i >= 0; i-- { + // `keepGitDir` optional argument + if !querybuilder.IsZeroValue(opts[i].KeepGitDir) { + q = q.Arg("keepGitDir", opts[i].KeepGitDir) + } + // `experimentalServiceHost` optional argument + if !querybuilder.IsZeroValue(opts[i].ExperimentalServiceHost) { + q = q.Arg("experimentalServiceHost", opts[i].ExperimentalServiceHost) + } + // `sshKnownHosts` optional argument + if !querybuilder.IsZeroValue(opts[i].SSHKnownHosts) { + q = q.Arg("sshKnownHosts", opts[i].SSHKnownHosts) + } + // `sshAuthSocket` optional argument + if !querybuilder.IsZeroValue(opts[i].SSHAuthSocket) { + q = q.Arg("sshAuthSocket", opts[i].SSHAuthSocket) + } + } + q = q.Arg("url", url) + + return &GitRepository{ + query: q, + } +} + +// HTTPOpts contains options for Client.HTTP +type HTTPOpts struct { + // A service which must be started before the URL is fetched. + ExperimentalServiceHost *Service +} + +// Returns a file containing an http remote url content. +func (r *Client) HTTP(url string, opts ...HTTPOpts) *File { + q := r.query.Select("http") + for i := len(opts) - 1; i >= 0; i-- { + // `experimentalServiceHost` optional argument + if !querybuilder.IsZeroValue(opts[i].ExperimentalServiceHost) { + q = q.Arg("experimentalServiceHost", opts[i].ExperimentalServiceHost) + } + } + q = q.Arg("url", url) + + return &File{ + query: q, + } +} + +func (r *Client) K3S(name string) *K3S { + q := r.query.Select("k3S") + q = q.Arg("name", name) + + return &K3S{ + query: q, + } +} + +// Load a CacheVolume from its ID. +func (r *Client) LoadCacheVolumeFromID(id CacheVolumeID) *CacheVolume { + q := r.query.Select("loadCacheVolumeFromID") + q = q.Arg("id", id) + + return &CacheVolume{ + query: q, + } +} + +// Load a Container from its ID. +func (r *Client) LoadContainerFromID(id ContainerID) *Container { + q := r.query.Select("loadContainerFromID") + q = q.Arg("id", id) + + return &Container{ + query: q, + } +} + +// Load a CurrentModule from its ID. +func (r *Client) LoadCurrentModuleFromID(id CurrentModuleID) *CurrentModule { + q := r.query.Select("loadCurrentModuleFromID") + q = q.Arg("id", id) + + return &CurrentModule{ + query: q, + } +} + +// Load a Directory from its ID. +func (r *Client) LoadDirectoryFromID(id DirectoryID) *Directory { + q := r.query.Select("loadDirectoryFromID") + q = q.Arg("id", id) + + return &Directory{ + query: q, + } +} + +// Load a EnvVariable from its ID. +func (r *Client) LoadEnvVariableFromID(id EnvVariableID) *EnvVariable { + q := r.query.Select("loadEnvVariableFromID") + q = q.Arg("id", id) + + return &EnvVariable{ + query: q, + } +} + +// Load a FieldTypeDef from its ID. +func (r *Client) LoadFieldTypeDefFromID(id FieldTypeDefID) *FieldTypeDef { + q := r.query.Select("loadFieldTypeDefFromID") + q = q.Arg("id", id) + + return &FieldTypeDef{ + query: q, + } +} + +// Load a File from its ID. +func (r *Client) LoadFileFromID(id FileID) *File { + q := r.query.Select("loadFileFromID") + q = q.Arg("id", id) + + return &File{ + query: q, + } +} + +// Load a FunctionArg from its ID. +func (r *Client) LoadFunctionArgFromID(id FunctionArgID) *FunctionArg { + q := r.query.Select("loadFunctionArgFromID") + q = q.Arg("id", id) + + return &FunctionArg{ + query: q, + } +} + +// Load a FunctionCallArgValue from its ID. +func (r *Client) LoadFunctionCallArgValueFromID(id FunctionCallArgValueID) *FunctionCallArgValue { + q := r.query.Select("loadFunctionCallArgValueFromID") + q = q.Arg("id", id) + + return &FunctionCallArgValue{ + query: q, + } +} + +// Load a FunctionCall from its ID. +func (r *Client) LoadFunctionCallFromID(id FunctionCallID) *FunctionCall { + q := r.query.Select("loadFunctionCallFromID") + q = q.Arg("id", id) + + return &FunctionCall{ + query: q, + } +} + +// Load a Function from its ID. +func (r *Client) LoadFunctionFromID(id FunctionID) *Function { + q := r.query.Select("loadFunctionFromID") + q = q.Arg("id", id) + + return &Function{ + query: q, + } +} + +// Load a GeneratedCode from its ID. +func (r *Client) LoadGeneratedCodeFromID(id GeneratedCodeID) *GeneratedCode { + q := r.query.Select("loadGeneratedCodeFromID") + q = q.Arg("id", id) + + return &GeneratedCode{ + query: q, + } +} + +// Load a GitModuleSource from its ID. +func (r *Client) LoadGitModuleSourceFromID(id GitModuleSourceID) *GitModuleSource { + q := r.query.Select("loadGitModuleSourceFromID") + q = q.Arg("id", id) + + return &GitModuleSource{ + query: q, + } +} + +// Load a GitRef from its ID. +func (r *Client) LoadGitRefFromID(id GitRefID) *GitRef { + q := r.query.Select("loadGitRefFromID") + q = q.Arg("id", id) + + return &GitRef{ + query: q, + } +} + +// Load a GitRepository from its ID. +func (r *Client) LoadGitRepositoryFromID(id GitRepositoryID) *GitRepository { + q := r.query.Select("loadGitRepositoryFromID") + q = q.Arg("id", id) + + return &GitRepository{ + query: q, + } +} + +// Load a InputTypeDef from its ID. +func (r *Client) LoadInputTypeDefFromID(id InputTypeDefID) *InputTypeDef { + q := r.query.Select("loadInputTypeDefFromID") + q = q.Arg("id", id) + + return &InputTypeDef{ + query: q, + } +} + +// Load a InterfaceTypeDef from its ID. +func (r *Client) LoadInterfaceTypeDefFromID(id InterfaceTypeDefID) *InterfaceTypeDef { + q := r.query.Select("loadInterfaceTypeDefFromID") + q = q.Arg("id", id) + + return &InterfaceTypeDef{ + query: q, + } +} + +// Load a K3S from its ID. +func (r *Client) LoadK3SFromID(id K3SID) *K3S { + q := r.query.Select("loadK3SFromID") + q = q.Arg("id", id) + + return &K3S{ + query: q, + } +} + +// Load a Label from its ID. +func (r *Client) LoadLabelFromID(id LabelID) *Label { + q := r.query.Select("loadLabelFromID") + q = q.Arg("id", id) + + return &Label{ + query: q, + } +} + +// Load a ListTypeDef from its ID. +func (r *Client) LoadListTypeDefFromID(id ListTypeDefID) *ListTypeDef { + q := r.query.Select("loadListTypeDefFromID") + q = q.Arg("id", id) + + return &ListTypeDef{ + query: q, + } +} + +// Load a LocalModuleSource from its ID. +func (r *Client) LoadLocalModuleSourceFromID(id LocalModuleSourceID) *LocalModuleSource { + q := r.query.Select("loadLocalModuleSourceFromID") + q = q.Arg("id", id) + + return &LocalModuleSource{ + query: q, + } +} + +// Load a ModuleDependency from its ID. +func (r *Client) LoadModuleDependencyFromID(id ModuleDependencyID) *ModuleDependency { + q := r.query.Select("loadModuleDependencyFromID") + q = q.Arg("id", id) + + return &ModuleDependency{ + query: q, + } +} + +// Load a Module from its ID. +func (r *Client) LoadModuleFromID(id ModuleID) *Module { + q := r.query.Select("loadModuleFromID") + q = q.Arg("id", id) + + return &Module{ + query: q, + } +} + +// Load a ModuleSource from its ID. +func (r *Client) LoadModuleSourceFromID(id ModuleSourceID) *ModuleSource { + q := r.query.Select("loadModuleSourceFromID") + q = q.Arg("id", id) + + return &ModuleSource{ + query: q, + } +} + +// Load a ModuleSourceView from its ID. +func (r *Client) LoadModuleSourceViewFromID(id ModuleSourceViewID) *ModuleSourceView { + q := r.query.Select("loadModuleSourceViewFromID") + q = q.Arg("id", id) + + return &ModuleSourceView{ + query: q, + } +} + +// Load a ObjectTypeDef from its ID. +func (r *Client) LoadObjectTypeDefFromID(id ObjectTypeDefID) *ObjectTypeDef { + q := r.query.Select("loadObjectTypeDefFromID") + q = q.Arg("id", id) + + return &ObjectTypeDef{ + query: q, + } +} + +// Load a Port from its ID. +func (r *Client) LoadPortFromID(id PortID) *Port { + q := r.query.Select("loadPortFromID") + q = q.Arg("id", id) + + return &Port{ + query: q, + } +} + +// Load a ScalarTypeDef from its ID. +func (r *Client) LoadScalarTypeDefFromID(id ScalarTypeDefID) *ScalarTypeDef { + q := r.query.Select("loadScalarTypeDefFromID") + q = q.Arg("id", id) + + return &ScalarTypeDef{ + query: q, + } +} + +// Load a Secret from its ID. +func (r *Client) LoadSecretFromID(id SecretID) *Secret { + q := r.query.Select("loadSecretFromID") + q = q.Arg("id", id) + + return &Secret{ + query: q, + } +} + +// Load a Service from its ID. +func (r *Client) LoadServiceFromID(id ServiceID) *Service { + q := r.query.Select("loadServiceFromID") + q = q.Arg("id", id) + + return &Service{ + query: q, + } +} + +// Load a Socket from its ID. +func (r *Client) LoadSocketFromID(id SocketID) *Socket { + q := r.query.Select("loadSocketFromID") + q = q.Arg("id", id) + + return &Socket{ + query: q, + } +} + +// Load a Terminal from its ID. +func (r *Client) LoadTerminalFromID(id TerminalID) *Terminal { + q := r.query.Select("loadTerminalFromID") + q = q.Arg("id", id) + + return &Terminal{ + query: q, + } +} + +// Load a TypeDef from its ID. +func (r *Client) LoadTypeDefFromID(id TypeDefID) *TypeDef { + q := r.query.Select("loadTypeDefFromID") + q = q.Arg("id", id) + + return &TypeDef{ + query: q, + } +} + +// Create a new module. +func (r *Client) Module() *Module { + q := r.query.Select("module") + + return &Module{ + query: q, + } +} + +// ModuleDependencyOpts contains options for Client.ModuleDependency +type ModuleDependencyOpts struct { + // If set, the name to use for the dependency. Otherwise, once installed to a parent module, the name of the dependency module will be used by default. + Name string +} + +// Create a new module dependency configuration from a module source and name +func (r *Client) ModuleDependency(source *ModuleSource, opts ...ModuleDependencyOpts) *ModuleDependency { + assertNotNil("source", source) + q := r.query.Select("moduleDependency") + for i := len(opts) - 1; i >= 0; i-- { + // `name` optional argument + if !querybuilder.IsZeroValue(opts[i].Name) { + q = q.Arg("name", opts[i].Name) + } + } + q = q.Arg("source", source) + + return &ModuleDependency{ + query: q, + } +} + +// ModuleSourceOpts contains options for Client.ModuleSource +type ModuleSourceOpts struct { + // If true, enforce that the source is a stable version for source kinds that support versioning. + Stable bool +} + +// Create a new module source instance from a source ref string. +func (r *Client) ModuleSource(refString string, opts ...ModuleSourceOpts) *ModuleSource { + q := r.query.Select("moduleSource") + for i := len(opts) - 1; i >= 0; i-- { + // `stable` optional argument + if !querybuilder.IsZeroValue(opts[i].Stable) { + q = q.Arg("stable", opts[i].Stable) + } + } + q = q.Arg("refString", refString) + + return &ModuleSource{ + query: q, + } +} + +// PipelineOpts contains options for Client.Pipeline +type PipelineOpts struct { + // Description of the sub-pipeline. + Description string + // Labels to apply to the sub-pipeline. + Labels []PipelineLabel +} + +// Creates a named sub-pipeline. +func (r *Client) Pipeline(name string, opts ...PipelineOpts) *Client { + q := r.query.Select("pipeline") + for i := len(opts) - 1; i >= 0; i-- { + // `description` optional argument + if !querybuilder.IsZeroValue(opts[i].Description) { + q = q.Arg("description", opts[i].Description) + } + // `labels` optional argument + if !querybuilder.IsZeroValue(opts[i].Labels) { + q = q.Arg("labels", opts[i].Labels) + } + } + q = q.Arg("name", name) + + return &Client{ + query: q, + client: r.client, + } +} + +// SecretOpts contains options for Client.Secret +type SecretOpts struct { + Accessor string +} + +// Reference a secret by name. +func (r *Client) Secret(name string, opts ...SecretOpts) *Secret { + q := r.query.Select("secret") + for i := len(opts) - 1; i >= 0; i-- { + // `accessor` optional argument + if !querybuilder.IsZeroValue(opts[i].Accessor) { + q = q.Arg("accessor", opts[i].Accessor) + } + } + q = q.Arg("name", name) + + return &Secret{ + query: q, + } +} + +// Sets a secret given a user defined name to its plaintext and returns the secret. +// +// The plaintext value is limited to a size of 128000 bytes. +func (r *Client) SetSecret(name string, plaintext string) *Secret { + q := r.query.Select("setSecret") + q = q.Arg("name", name) + q = q.Arg("plaintext", plaintext) + + return &Secret{ + query: q, + } +} + +// Loads a socket by its ID. +// +// Deprecated: Use LoadSocketFromID instead. +func (r *Client) Socket(id SocketID) *Socket { + q := r.query.Select("socket") + q = q.Arg("id", id) + + return &Socket{ + query: q, + } +} + +// Create a new TypeDef. +func (r *Client) TypeDef() *TypeDef { + q := r.query.Select("typeDef") + + return &TypeDef{ + query: q, + } +} + +// Get the current Dagger Engine version. +func (r *Client) Version(ctx context.Context) (string, error) { + q := r.query.Select("version") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A definition of a custom scalar defined in a Module. +type ScalarTypeDef struct { + query *querybuilder.Selection + + description *string + id *ScalarTypeDefID + name *string + sourceModuleName *string +} + +func (r *ScalarTypeDef) WithGraphQLQuery(q *querybuilder.Selection) *ScalarTypeDef { + return &ScalarTypeDef{ + query: q, + } +} + +// A doc string for the scalar, if any. +func (r *ScalarTypeDef) Description(ctx context.Context) (string, error) { + if r.description != nil { + return *r.description, nil + } + q := r.query.Select("description") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A unique identifier for this ScalarTypeDef. +func (r *ScalarTypeDef) ID(ctx context.Context) (ScalarTypeDefID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response ScalarTypeDefID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *ScalarTypeDef) XXX_GraphQLType() string { + return "ScalarTypeDef" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *ScalarTypeDef) XXX_GraphQLIDType() string { + return "ScalarTypeDefID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *ScalarTypeDef) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *ScalarTypeDef) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *ScalarTypeDef) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadScalarTypeDefFromID(ScalarTypeDefID(id)) + return nil +} + +// The name of the scalar. +func (r *ScalarTypeDef) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// If this ScalarTypeDef is associated with a Module, the name of the module. Unset otherwise. +func (r *ScalarTypeDef) SourceModuleName(ctx context.Context) (string, error) { + if r.sourceModuleName != nil { + return *r.sourceModuleName, nil + } + q := r.query.Select("sourceModuleName") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A reference to a secret value, which can be handled more safely than the value itself. +type Secret struct { + query *querybuilder.Selection + + id *SecretID + name *string + plaintext *string +} + +func (r *Secret) WithGraphQLQuery(q *querybuilder.Selection) *Secret { + return &Secret{ + query: q, + } +} + +// A unique identifier for this Secret. +func (r *Secret) ID(ctx context.Context) (SecretID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response SecretID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *Secret) XXX_GraphQLType() string { + return "Secret" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *Secret) XXX_GraphQLIDType() string { + return "SecretID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *Secret) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *Secret) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *Secret) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadSecretFromID(SecretID(id)) + return nil +} + +// The name of this secret. +func (r *Secret) Name(ctx context.Context) (string, error) { + if r.name != nil { + return *r.name, nil + } + q := r.query.Select("name") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// The value of this secret. +func (r *Secret) Plaintext(ctx context.Context) (string, error) { + if r.plaintext != nil { + return *r.plaintext, nil + } + q := r.query.Select("plaintext") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A content-addressed service providing TCP connectivity. +type Service struct { + query *querybuilder.Selection + + endpoint *string + hostname *string + id *ServiceID + start *ServiceID + stop *ServiceID + up *Void +} + +func (r *Service) WithGraphQLQuery(q *querybuilder.Selection) *Service { + return &Service{ + query: q, + } +} + +// ServiceEndpointOpts contains options for Service.Endpoint +type ServiceEndpointOpts struct { + // The exposed port number for the endpoint + Port int + // Return a URL with the given scheme, eg. http for http:// + Scheme string +} + +// Retrieves an endpoint that clients can use to reach this container. +// +// If no port is specified, the first exposed port is used. If none exist an error is returned. +// +// If a scheme is specified, a URL is returned. Otherwise, a host:port pair is returned. +func (r *Service) Endpoint(ctx context.Context, opts ...ServiceEndpointOpts) (string, error) { + if r.endpoint != nil { + return *r.endpoint, nil + } + q := r.query.Select("endpoint") + for i := len(opts) - 1; i >= 0; i-- { + // `port` optional argument + if !querybuilder.IsZeroValue(opts[i].Port) { + q = q.Arg("port", opts[i].Port) + } + // `scheme` optional argument + if !querybuilder.IsZeroValue(opts[i].Scheme) { + q = q.Arg("scheme", opts[i].Scheme) + } + } + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Retrieves a hostname which can be used by clients to reach this container. +func (r *Service) Hostname(ctx context.Context) (string, error) { + if r.hostname != nil { + return *r.hostname, nil + } + q := r.query.Select("hostname") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A unique identifier for this Service. +func (r *Service) ID(ctx context.Context) (ServiceID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response ServiceID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *Service) XXX_GraphQLType() string { + return "Service" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *Service) XXX_GraphQLIDType() string { + return "ServiceID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *Service) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *Service) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *Service) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadServiceFromID(ServiceID(id)) + return nil +} + +// Retrieves the list of ports provided by the service. +func (r *Service) Ports(ctx context.Context) ([]Port, error) { + q := r.query.Select("ports") + + q = q.Select("id") + + type ports struct { + Id PortID + } + + convert := func(fields []ports) []Port { + out := []Port{} + + for i := range fields { + val := Port{id: &fields[i].Id} + val.query = q.Root().Select("loadPortFromID").Arg("id", fields[i].Id) + out = append(out, val) + } + + return out + } + var response []ports + + q = q.Bind(&response) + + err := q.Execute(ctx) + if err != nil { + return nil, err + } + + return convert(response), nil +} + +// Start the service and wait for its health checks to succeed. +// +// Services bound to a Container do not need to be manually started. +func (r *Service) Start(ctx context.Context) (*Service, error) { + q := r.query.Select("start") + + return r, q.Execute(ctx) +} + +// ServiceStopOpts contains options for Service.Stop +type ServiceStopOpts struct { + // Immediately kill the service without waiting for a graceful exit + Kill bool +} + +// Stop the service. +func (r *Service) Stop(ctx context.Context, opts ...ServiceStopOpts) (*Service, error) { + q := r.query.Select("stop") + for i := len(opts) - 1; i >= 0; i-- { + // `kill` optional argument + if !querybuilder.IsZeroValue(opts[i].Kill) { + q = q.Arg("kill", opts[i].Kill) + } + } + + return r, q.Execute(ctx) +} + +// ServiceUpOpts contains options for Service.Up +type ServiceUpOpts struct { + // List of frontend/backend port mappings to forward. + // + // Frontend is the port accepting traffic on the host, backend is the service port. + Ports []PortForward + // Bind each tunnel port to a random port on the host. + Random bool +} + +// Creates a tunnel that forwards traffic from the caller's network to this service. +func (r *Service) Up(ctx context.Context, opts ...ServiceUpOpts) (Void, error) { + if r.up != nil { + return *r.up, nil + } + q := r.query.Select("up") + for i := len(opts) - 1; i >= 0; i-- { + // `ports` optional argument + if !querybuilder.IsZeroValue(opts[i].Ports) { + q = q.Arg("ports", opts[i].Ports) + } + // `random` optional argument + if !querybuilder.IsZeroValue(opts[i].Random) { + q = q.Arg("random", opts[i].Random) + } + } + + var response Void + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A Unix or TCP/IP socket that can be mounted into a container. +type Socket struct { + query *querybuilder.Selection + + id *SocketID +} + +func (r *Socket) WithGraphQLQuery(q *querybuilder.Selection) *Socket { + return &Socket{ + query: q, + } +} + +// A unique identifier for this Socket. +func (r *Socket) ID(ctx context.Context) (SocketID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response SocketID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *Socket) XXX_GraphQLType() string { + return "Socket" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *Socket) XXX_GraphQLIDType() string { + return "SocketID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *Socket) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *Socket) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *Socket) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadSocketFromID(SocketID(id)) + return nil +} + +// An interactive terminal that clients can connect to. +type Terminal struct { + query *querybuilder.Selection + + id *TerminalID + websocketEndpoint *string +} + +func (r *Terminal) WithGraphQLQuery(q *querybuilder.Selection) *Terminal { + return &Terminal{ + query: q, + } +} + +// A unique identifier for this Terminal. +func (r *Terminal) ID(ctx context.Context) (TerminalID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response TerminalID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *Terminal) XXX_GraphQLType() string { + return "Terminal" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *Terminal) XXX_GraphQLIDType() string { + return "TerminalID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *Terminal) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *Terminal) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *Terminal) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadTerminalFromID(TerminalID(id)) + return nil +} + +// An http endpoint at which this terminal can be connected to over a websocket. +func (r *Terminal) WebsocketEndpoint(ctx context.Context) (string, error) { + if r.websocketEndpoint != nil { + return *r.websocketEndpoint, nil + } + q := r.query.Select("websocketEndpoint") + + var response string + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// A definition of a parameter or return type in a Module. +type TypeDef struct { + query *querybuilder.Selection + + id *TypeDefID + kind *TypeDefKind + optional *bool +} +type WithTypeDefFunc func(r *TypeDef) *TypeDef + +// With calls the provided function with current TypeDef. +// +// This is useful for reusability and readability by not breaking the calling chain. +func (r *TypeDef) With(f WithTypeDefFunc) *TypeDef { + return f(r) +} + +func (r *TypeDef) WithGraphQLQuery(q *querybuilder.Selection) *TypeDef { + return &TypeDef{ + query: q, + } +} + +// If kind is INPUT, the input-specific type definition. If kind is not INPUT, this will be null. +func (r *TypeDef) AsInput() *InputTypeDef { + q := r.query.Select("asInput") + + return &InputTypeDef{ + query: q, + } +} + +// If kind is INTERFACE, the interface-specific type definition. If kind is not INTERFACE, this will be null. +func (r *TypeDef) AsInterface() *InterfaceTypeDef { + q := r.query.Select("asInterface") + + return &InterfaceTypeDef{ + query: q, + } +} + +// If kind is LIST, the list-specific type definition. If kind is not LIST, this will be null. +func (r *TypeDef) AsList() *ListTypeDef { + q := r.query.Select("asList") + + return &ListTypeDef{ + query: q, + } +} + +// If kind is OBJECT, the object-specific type definition. If kind is not OBJECT, this will be null. +func (r *TypeDef) AsObject() *ObjectTypeDef { + q := r.query.Select("asObject") + + return &ObjectTypeDef{ + query: q, + } +} + +// If kind is SCALAR, the scalar-specific type definition. If kind is not SCALAR, this will be null. +func (r *TypeDef) AsScalar() *ScalarTypeDef { + q := r.query.Select("asScalar") + + return &ScalarTypeDef{ + query: q, + } +} + +// A unique identifier for this TypeDef. +func (r *TypeDef) ID(ctx context.Context) (TypeDefID, error) { + if r.id != nil { + return *r.id, nil + } + q := r.query.Select("id") + + var response TypeDefID + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// XXX_GraphQLType is an internal function. It returns the native GraphQL type name +func (r *TypeDef) XXX_GraphQLType() string { + return "TypeDef" +} + +// XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object +func (r *TypeDef) XXX_GraphQLIDType() string { + return "TypeDefID" +} + +// XXX_GraphQLID is an internal function. It returns the underlying type ID +func (r *TypeDef) XXX_GraphQLID(ctx context.Context) (string, error) { + id, err := r.ID(ctx) + if err != nil { + return "", err + } + return string(id), nil +} + +func (r *TypeDef) MarshalJSON() ([]byte, error) { + id, err := r.ID(marshalCtx) + if err != nil { + return nil, err + } + return json.Marshal(id) +} +func (r *TypeDef) UnmarshalJSON(bs []byte) error { + var id string + err := json.Unmarshal(bs, &id) + if err != nil { + return err + } + *r = *dag.LoadTypeDefFromID(TypeDefID(id)) + return nil +} + +// The kind of type this is (e.g. primitive, list, object). +func (r *TypeDef) Kind(ctx context.Context) (TypeDefKind, error) { + if r.kind != nil { + return *r.kind, nil + } + q := r.query.Select("kind") + + var response TypeDefKind + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Whether this type can be set to null. Defaults to false. +func (r *TypeDef) Optional(ctx context.Context) (bool, error) { + if r.optional != nil { + return *r.optional, nil + } + q := r.query.Select("optional") + + var response bool + + q = q.Bind(&response) + return response, q.Execute(ctx) +} + +// Adds a function for constructing a new instance of an Object TypeDef, failing if the type is not an object. +func (r *TypeDef) WithConstructor(function *Function) *TypeDef { + assertNotNil("function", function) + q := r.query.Select("withConstructor") + q = q.Arg("function", function) + + return &TypeDef{ + query: q, + } +} + +// TypeDefWithFieldOpts contains options for TypeDef.WithField +type TypeDefWithFieldOpts struct { + // A doc string for the field, if any + Description string +} + +// Adds a static field for an Object TypeDef, failing if the type is not an object. +func (r *TypeDef) WithField(name string, typeDef *TypeDef, opts ...TypeDefWithFieldOpts) *TypeDef { + assertNotNil("typeDef", typeDef) + q := r.query.Select("withField") + for i := len(opts) - 1; i >= 0; i-- { + // `description` optional argument + if !querybuilder.IsZeroValue(opts[i].Description) { + q = q.Arg("description", opts[i].Description) + } + } + q = q.Arg("name", name) + q = q.Arg("typeDef", typeDef) + + return &TypeDef{ + query: q, + } +} + +// Adds a function for an Object or Interface TypeDef, failing if the type is not one of those kinds. +func (r *TypeDef) WithFunction(function *Function) *TypeDef { + assertNotNil("function", function) + q := r.query.Select("withFunction") + q = q.Arg("function", function) + + return &TypeDef{ + query: q, + } +} + +// TypeDefWithInterfaceOpts contains options for TypeDef.WithInterface +type TypeDefWithInterfaceOpts struct { + Description string +} + +// Returns a TypeDef of kind Interface with the provided name. +func (r *TypeDef) WithInterface(name string, opts ...TypeDefWithInterfaceOpts) *TypeDef { + q := r.query.Select("withInterface") + for i := len(opts) - 1; i >= 0; i-- { + // `description` optional argument + if !querybuilder.IsZeroValue(opts[i].Description) { + q = q.Arg("description", opts[i].Description) + } + } + q = q.Arg("name", name) + + return &TypeDef{ + query: q, + } +} + +// Sets the kind of the type. +func (r *TypeDef) WithKind(kind TypeDefKind) *TypeDef { + q := r.query.Select("withKind") + q = q.Arg("kind", kind) + + return &TypeDef{ + query: q, + } +} + +// Returns a TypeDef of kind List with the provided type for its elements. +func (r *TypeDef) WithListOf(elementType *TypeDef) *TypeDef { + assertNotNil("elementType", elementType) + q := r.query.Select("withListOf") + q = q.Arg("elementType", elementType) + + return &TypeDef{ + query: q, + } +} + +// TypeDefWithObjectOpts contains options for TypeDef.WithObject +type TypeDefWithObjectOpts struct { + Description string +} + +// Returns a TypeDef of kind Object with the provided name. +// +// Note that an object's fields and functions may be omitted if the intent is only to refer to an object. This is how functions are able to return their own object, or any other circular reference. +func (r *TypeDef) WithObject(name string, opts ...TypeDefWithObjectOpts) *TypeDef { + q := r.query.Select("withObject") + for i := len(opts) - 1; i >= 0; i-- { + // `description` optional argument + if !querybuilder.IsZeroValue(opts[i].Description) { + q = q.Arg("description", opts[i].Description) + } + } + q = q.Arg("name", name) + + return &TypeDef{ + query: q, + } +} + +// Sets whether this type can be set to null. +func (r *TypeDef) WithOptional(optional bool) *TypeDef { + q := r.query.Select("withOptional") + q = q.Arg("optional", optional) + + return &TypeDef{ + query: q, + } +} + +// TypeDefWithScalarOpts contains options for TypeDef.WithScalar +type TypeDefWithScalarOpts struct { + Description string +} + +// Returns a TypeDef of kind Scalar with the provided name. +func (r *TypeDef) WithScalar(name string, opts ...TypeDefWithScalarOpts) *TypeDef { + q := r.query.Select("withScalar") + for i := len(opts) - 1; i >= 0; i-- { + // `description` optional argument + if !querybuilder.IsZeroValue(opts[i].Description) { + q = q.Arg("description", opts[i].Description) + } + } + q = q.Arg("name", name) + + return &TypeDef{ + query: q, + } +} + +type CacheSharingMode string + +func (CacheSharingMode) IsEnum() {} + +const ( + // Shares the cache volume amongst many build pipelines, but will serialize the writes + Locked CacheSharingMode = "LOCKED" + + // Keeps a cache volume for a single build pipeline + Private CacheSharingMode = "PRIVATE" + + // Shares the cache volume amongst many build pipelines + Shared CacheSharingMode = "SHARED" +) + +type ImageLayerCompression string + +func (ImageLayerCompression) IsEnum() {} + +const ( + Estargz ImageLayerCompression = "EStarGZ" + + Gzip ImageLayerCompression = "Gzip" + + Uncompressed ImageLayerCompression = "Uncompressed" + + Zstd ImageLayerCompression = "Zstd" +) + +type ImageMediaTypes string + +func (ImageMediaTypes) IsEnum() {} + +const ( + Dockermediatypes ImageMediaTypes = "DockerMediaTypes" + + Ocimediatypes ImageMediaTypes = "OCIMediaTypes" +) + +type ModuleSourceKind string + +func (ModuleSourceKind) IsEnum() {} + +const ( + GitSource ModuleSourceKind = "GIT_SOURCE" + + LocalSource ModuleSourceKind = "LOCAL_SOURCE" +) + +type NetworkProtocol string + +func (NetworkProtocol) IsEnum() {} + +const ( + Tcp NetworkProtocol = "TCP" + + Udp NetworkProtocol = "UDP" +) + +type TypeDefKind string + +func (TypeDefKind) IsEnum() {} + +const ( + // A boolean value. + BooleanKind TypeDefKind = "BOOLEAN_KIND" + + // A graphql input type, used only when representing the core API via TypeDefs. + InputKind TypeDefKind = "INPUT_KIND" + + // An integer value. + IntegerKind TypeDefKind = "INTEGER_KIND" + + // A named type of functions that can be matched+implemented by other objects+interfaces. + // + // Always paired with an InterfaceTypeDef. + InterfaceKind TypeDefKind = "INTERFACE_KIND" + + // A list of values all having the same type. + // + // Always paired with a ListTypeDef. + ListKind TypeDefKind = "LIST_KIND" + + // A named type defined in the GraphQL schema, with fields and functions. + // + // Always paired with an ObjectTypeDef. + ObjectKind TypeDefKind = "OBJECT_KIND" + + // A scalar value of any basic kind. + ScalarKind TypeDefKind = "SCALAR_KIND" + + // A string value. + StringKind TypeDefKind = "STRING_KIND" + + // A special kind used to signify that no value is returned. + // + // This is used for functions that have no return value. The outer TypeDef specifying this Kind is always Optional, as the Void is never actually represented. + VoidKind TypeDefKind = "VOID_KIND" +) + +type Client struct { + query *querybuilder.Selection + client graphql.Client +} + +var dag *Client + +func init() { + gqlClient, q := getClientParams() + dag = &Client{ + query: q.Client(gqlClient), + client: gqlClient, + } +} + +func Connect() *Client { + return dag +} + +// GraphQLClient returns the underlying graphql.Client +func (c *Client) GraphQLClient() graphql.Client { + return c.client +} + +func getClientParams() (graphql.Client, *querybuilder.Selection) { + portStr, ok := os.LookupEnv("DAGGER_SESSION_PORT") + if !ok { + panic("DAGGER_SESSION_PORT is not set") + } + port, err := strconv.Atoi(portStr) + if err != nil { + panic(fmt.Errorf("DAGGER_SESSION_PORT %q is invalid: %w", portStr, err)) + } + + sessionToken := os.Getenv("DAGGER_SESSION_TOKEN") + if sessionToken == "" { + panic("DAGGER_SESSION_TOKEN is not set") + } + + host := fmt.Sprintf("127.0.0.1:%d", port) + + dialTransport := &http.Transport{ + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("tcp", host) + }, + } + httpClient := &http.Client{ + Transport: roundTripperFunc(func(r *http.Request) (*http.Response, error) { + r.SetBasicAuth(sessionToken, "") + + // detect $TRACEPARENT set by 'dagger run' + r = r.WithContext(fallbackSpanContext(r.Context())) + + // propagate span context via headers (i.e. for Dagger-in-Dagger) + otel.GetTextMapPropagator().Inject(r.Context(), propagation.HeaderCarrier(r.Header)) + + return dialTransport.RoundTrip(r) + }), + } + gqlClient := errorWrappedClient{graphql.NewClient(fmt.Sprintf("http://%s/query", host), httpClient)} + + return gqlClient, querybuilder.Query() +} + +func fallbackSpanContext(ctx context.Context) context.Context { + if trace.SpanContextFromContext(ctx).IsValid() { + return ctx + } + return otel.GetTextMapPropagator().Extract(ctx, telemetry.NewEnvCarrier(true)) +} + +// TODO: pollutes namespace, move to non internal package in dagger.io/dagger +type roundTripperFunc func(*http.Request) (*http.Response, error) + +func (fn roundTripperFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return fn(req) +} + +type errorWrappedClient struct { + graphql.Client +} + +func (c errorWrappedClient) MakeRequest(ctx context.Context, req *graphql.Request, resp *graphql.Response) error { + err := c.Client.MakeRequest(ctx, req, resp) + if err != nil { + if e := getCustomError(err); e != nil { + return e + } + return err + } + return nil +} diff --git a/dagger/internal/querybuilder/marshal.go b/dagger/internal/querybuilder/marshal.go new file mode 100644 index 000000000..1f5468af0 --- /dev/null +++ b/dagger/internal/querybuilder/marshal.go @@ -0,0 +1,162 @@ +package querybuilder + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "reflect" + "strings" + + gqlgen "github.com/99designs/gqlgen/graphql" + "golang.org/x/exp/slices" + "golang.org/x/sync/errgroup" +) + +// GraphQLMarshaller is an internal interface for marshalling an object into GraphQL. +type GraphQLMarshaller interface { + // XXX_GraphQLType is an internal function. It returns the native GraphQL type name + XXX_GraphQLType() string + // XXX_GraphQLIDType is an internal function. It returns the native GraphQL type name for the ID of this object + XXX_GraphQLIDType() string + // XXX_GraphqlID is an internal function. It returns the underlying type ID + XXX_GraphQLID(ctx context.Context) (string, error) + json.Marshaler +} + +const ( + GraphQLMarshallerType = "XXX_GraphQLType" + GraphQLMarshallerIDType = "XXX_GraphQLIDType" + GraphQLMarshallerID = "XXX_GraphQLID" +) + +type enum interface { + IsEnum() +} + +var ( + gqlMarshaller = reflect.TypeOf((*GraphQLMarshaller)(nil)).Elem() + enumT = reflect.TypeOf((*enum)(nil)).Elem() +) + +func MarshalGQL(ctx context.Context, v any) (string, error) { + return marshalValue(ctx, reflect.ValueOf(v)) +} + +func marshalValue(ctx context.Context, v reflect.Value) (string, error) { + t := v.Type() + + if t.Implements(gqlMarshaller) { + return marshalCustom(ctx, v) + } + + switch t.Kind() { + case reflect.Bool: + return fmt.Sprintf("%t", v.Bool()), nil + case reflect.Int: + return fmt.Sprintf("%d", v.Int()), nil + case reflect.String: + if t.Implements(enumT) { + // enums render as their literal value + return v.String(), nil + } + + // escape strings following graphQL spec + // https://github.com/graphql/graphql-spec/blob/main/spec/Section%202%20--%20Language.md#string-value + var buf bytes.Buffer + gqlgen.MarshalString(v.String()).MarshalGQL(&buf) + return buf.String(), nil + case reflect.Pointer, reflect.Interface: + if v.IsNil() { + return "null", nil + } + return marshalValue(ctx, v.Elem()) + case reflect.Slice: + n := v.Len() + elems := make([]string, n) + eg, gctx := errgroup.WithContext(ctx) + for i := 0; i < n; i++ { + i := i + eg.Go(func() error { + m, err := marshalValue(gctx, v.Index(i)) + if err != nil { + return err + } + elems[i] = m + return nil + }) + } + if err := eg.Wait(); err != nil { + return "", err + } + return fmt.Sprintf("[%s]", strings.Join(elems, ",")), nil + case reflect.Struct: + n := v.NumField() + elems := make([]string, n) + eg, gctx := errgroup.WithContext(ctx) + for i := 0; i < n; i++ { + i := i + eg.Go(func() error { + f := t.Field(i) + fv := v.Field(i) + name := f.Name + jsonTag := strings.Split(f.Tag.Get("json"), ",") + if jsonTag[0] != "" { + name = jsonTag[0] + } + isOptional := slices.Contains(jsonTag[1:], "omitempty") + if isOptional && IsZeroValue(fv.Interface()) { + return nil + } + m, err := marshalValue(gctx, fv) + if err != nil { + return err + } + if m != `""` && m != "null" { + elems[i] = fmt.Sprintf("%s:%s", name, m) + } + return nil + }) + } + if err := eg.Wait(); err != nil { + return "", err + } + nonNullElems := make([]string, 0, n) + for _, elem := range elems { + if elem != "" { + nonNullElems = append(nonNullElems, elem) + } + } + return fmt.Sprintf("{%s}", strings.Join(nonNullElems, ",")), nil + default: + panic(fmt.Errorf("unsupported argument of kind %s", t.Kind())) + } +} + +func marshalCustom(ctx context.Context, v reflect.Value) (string, error) { + result := v.MethodByName(GraphQLMarshallerID).Call([]reflect.Value{ + reflect.ValueOf(ctx), + }) + if len(result) != 2 { + panic(result) + } + err := result[1].Interface() + if err != nil { + return "", err.(error) + } + + return fmt.Sprintf("%q", result[0].String()), nil +} + +func IsZeroValue(value any) bool { + v := reflect.ValueOf(value) + kind := v.Type().Kind() + switch kind { + case reflect.Pointer: + return v.IsNil() + case reflect.Slice, reflect.Array: + return v.Len() == 0 + default: + return v.IsZero() + } +} diff --git a/dagger/internal/querybuilder/querybuilder.go b/dagger/internal/querybuilder/querybuilder.go new file mode 100644 index 000000000..ee8c9e2cb --- /dev/null +++ b/dagger/internal/querybuilder/querybuilder.go @@ -0,0 +1,206 @@ +package querybuilder + +import ( + "context" + "encoding/json" + "fmt" + "runtime/debug" + "strings" + "sync" + + "github.com/Khan/genqlient/graphql" + "golang.org/x/sync/errgroup" +) + +func Query() *Selection { + return &Selection{} +} + +type Selection struct { + name string + alias string + args map[string]*argument + bind interface{} + + prev *Selection + + client graphql.Client +} + +func (s *Selection) path() []*Selection { + selections := []*Selection{} + for sel := s; sel.prev != nil; sel = sel.prev { + selections = append([]*Selection{sel}, selections...) + } + + return selections +} + +func (s *Selection) Root() *Selection { + return &Selection{ + client: s.client, + } +} + +func (s *Selection) SelectWithAlias(alias, name string) *Selection { + sel := &Selection{ + name: name, + prev: s, + alias: alias, + client: s.client, + } + return sel +} + +func (s *Selection) Select(name string) *Selection { + return s.SelectWithAlias("", name) +} + +func (s *Selection) Arg(name string, value any) *Selection { + sel := *s + if sel.args == nil { + sel.args = map[string]*argument{} + } + + sel.args[name] = &argument{ + value: value, + } + return &sel +} + +func (s *Selection) Bind(v interface{}) *Selection { + sel := *s + sel.bind = v + return &sel +} + +func (s *Selection) marshalArguments(ctx context.Context) error { + eg, gctx := errgroup.WithContext(ctx) + for _, sel := range s.path() { + for _, arg := range sel.args { + arg := arg + eg.Go(func() error { + return arg.marshal(gctx) + }) + } + } + + return eg.Wait() +} + +func (s *Selection) Build(ctx context.Context) (string, error) { + if err := s.marshalArguments(ctx); err != nil { + return "", err + } + + var b strings.Builder + b.WriteString("query") + + path := s.path() + + for _, sel := range path { + b.WriteRune('{') + + if sel.alias != "" { + b.WriteString(sel.alias) + b.WriteRune(':') + } + + b.WriteString(sel.name) + + if len(sel.args) > 0 { + b.WriteRune('(') + i := 0 + for name, arg := range sel.args { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(name) + b.WriteRune(':') + b.WriteString(arg.marshalled) + i++ + } + b.WriteRune(')') + } + } + + b.WriteString(strings.Repeat("}", len(path))) + return b.String(), nil +} + +func (s *Selection) unpack(data interface{}) error { + for _, i := range s.path() { + k := i.name + if i.alias != "" { + k = i.alias + } + + // Try to assert type of the value + switch f := data.(type) { + case map[string]interface{}: + data = f[k] + case []interface{}: + data = f + default: + fmt.Printf("type not found %s\n", f) + } + + if i.bind != nil { + marshalled, err := json.Marshal(data) + if err != nil { + return err + } + if err := json.Unmarshal(marshalled, s.bind); err != nil { + return err + } + } + } + + return nil +} + +func (s *Selection) Client(c graphql.Client) *Selection { + sel := *s + sel.client = c + return &sel +} + +func (s *Selection) Execute(ctx context.Context) error { + if s.client == nil { + debug.PrintStack() + return fmt.Errorf("no client configured for selection") + } + + query, err := s.Build(ctx) + if err != nil { + return err + } + + var response any + err = s.client.MakeRequest(ctx, + &graphql.Request{ + Query: query, + }, + &graphql.Response{Data: &response}, + ) + if err != nil { + return err + } + + return s.unpack(response) +} + +type argument struct { + value any + + marshalled string + marshalledErr error + once sync.Once +} + +func (a *argument) marshal(ctx context.Context) error { + a.once.Do(func() { + a.marshalled, a.marshalledErr = MarshalGQL(ctx, a.value) + }) + return a.marshalledErr +} diff --git a/dagger/internal/telemetry/attrs.go b/dagger/internal/telemetry/attrs.go new file mode 100644 index 000000000..9f3254919 --- /dev/null +++ b/dagger/internal/telemetry/attrs.go @@ -0,0 +1,85 @@ +package telemetry + +// The following attributes are used by the UI to interpret spans and control +// their behavior in the UI. +const ( + // The base64-encoded, protobuf-marshalled callpbv1.Call that this span + // represents. + DagCallAttr = "dagger.io/dag.call" + + // The digest of the protobuf-marshalled Call that this span represents. + // + // This value acts as a node ID in the conceptual DAG. + DagDigestAttr = "dagger.io/dag.digest" + + // The list of DAG digests that the span depends on. + // + // This is not currently used by the UI, but it could be used to drive higher + // level DAG walking processes without having to unmarshal the full call. + DagInputsAttr = "dagger.io/dag.inputs" + + // The DAG call digest that the call returned, if the call returned an + // Object. + // + // This information is used to simplify values in the UI by showing their + // highest-level creator. For example, if foo().bar() returns a().b().c(), we + // will show foo().bar() instead of a().b().c() as it will be a more + // recognizable value to the user. + DagOutputAttr = "dagger.io/dag.output" + + // Indicates that this span is "internal" and can be hidden by default. + // + // Internal spans may typically be revealed with a toggle. + UIInternalAttr = "dagger.io/ui.internal" + + // Hide child spans by default. + // + // Encapsulated child spans may typically be revealed if the parent span errors. + UIEncapsulateAttr = "dagger.io/ui.encapsulate" + + // Hide span by default. + // + // This is functionally the same as UIEncapsulateAttr, but is instead set + // on a child instead of a parent. + UIEncapsulatedAttr = "dagger.io/ui.encapsulated" + + // Substitute the span for its children and move its logs to its parent. + UIPassthroughAttr = "dagger.io/ui.passthrough" //nolint: gosec // lol + + // NB: the following attributes are not currently used. + + // Indicates that this span was a cache hit and did nothing. + CachedAttr = "dagger.io/dag.cached" + + // Indicates that this span was interrupted. + CanceledAttr = "dagger.io/dag.canceled" + + // The base64-encoded, protobuf-marshalled Buildkit LLB op payload that this + // span represents. + LLBOpAttr = "dagger.io/llb.op" + + // The digests of the LLB operations that this span depends on, allowing the + // UI to attribute their future "cost." + LLBDigestsAttr = "dagger.io/llb.digests" + + // The amount of progress that needs to be reached. + ProgressTotalAttr = "dagger.io/progress.total" + + // Current value for the progress. + ProgressCurrentAttr = "dagger.io/progress.current" + + // Indicates the units for the progress numbers. + ProgressUnitsAttr = "dagger.io/progress.units" + + // The client ID that generated this telemetry. + ClientIDAttr = "dagger.io/client.id" + + // The stdio stream a log corresponds to (1 for stdout, 2 for stderr). + StdioStreamAttr = "stdio.stream" + + // Indicates whether the log stream has ended. + StdioEOFAttr = "stdio.eof" + + // Indicates whether the log should be shown globally. + LogsGlobalAttr = "dagger.io/logs.global" +) diff --git a/dagger/internal/telemetry/env.go b/dagger/internal/telemetry/env.go new file mode 100644 index 000000000..82c3528c4 --- /dev/null +++ b/dagger/internal/telemetry/env.go @@ -0,0 +1,60 @@ +package telemetry + +import ( + "context" + "os" + "strings" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/propagation" +) + +func PropagationEnv(ctx context.Context) []string { + carrier := NewEnvCarrier(false) + otel.GetTextMapPropagator().Inject(ctx, carrier) + return carrier.Env +} + +type EnvCarrier struct { + System bool + Env []string +} + +func NewEnvCarrier(system bool) *EnvCarrier { + return &EnvCarrier{ + System: system, + } +} + +var _ propagation.TextMapCarrier = (*EnvCarrier)(nil) + +func (c *EnvCarrier) Get(key string) string { + envName := strings.ToUpper(key) + for _, env := range c.Env { + env, val, ok := strings.Cut(env, "=") + if ok && env == envName { + return val + } + } + if c.System { + if envVal := os.Getenv(envName); envVal != "" { + return envVal + } + } + return "" +} + +func (c *EnvCarrier) Set(key, val string) { + c.Env = append(c.Env, strings.ToUpper(key)+"="+val) +} + +func (c *EnvCarrier) Keys() []string { + keys := make([]string, 0, len(c.Env)) + for _, env := range c.Env { + env, _, ok := strings.Cut(env, "=") + if ok { + keys = append(keys, env) + } + } + return keys +} diff --git a/dagger/internal/telemetry/exporters.go b/dagger/internal/telemetry/exporters.go new file mode 100644 index 000000000..988d1add2 --- /dev/null +++ b/dagger/internal/telemetry/exporters.go @@ -0,0 +1,120 @@ +package telemetry + +import ( + "context" + + sdklog "go.opentelemetry.io/otel/sdk/log" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + "go.opentelemetry.io/otel/trace/noop" + "golang.org/x/sync/errgroup" +) + +type SpanForwarder struct { + Processors []sdktrace.SpanProcessor +} + +var _ sdktrace.SpanExporter = SpanForwarder{} + +type discardWritesSpan struct { + noop.Span + sdktrace.ReadOnlySpan +} + +func (s discardWritesSpan) SpanContext() trace.SpanContext { + return s.ReadOnlySpan.SpanContext() +} + +func (m SpanForwarder) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { + eg := new(errgroup.Group) + for _, p := range m.Processors { + p := p + eg.Go(func() error { + for _, span := range spans { + if span.EndTime().Before(span.StartTime()) { + p.OnStart(ctx, discardWritesSpan{noop.Span{}, span}) + } else { + p.OnEnd(span) + } + } + return nil + }) + } + return eg.Wait() +} + +func (m SpanForwarder) Shutdown(ctx context.Context) error { + eg := new(errgroup.Group) + for _, p := range m.Processors { + p := p + eg.Go(func() error { + return p.Shutdown(ctx) + }) + } + return eg.Wait() +} + +// FilterLiveSpansExporter is a SpanExporter that filters out spans that are +// currently running, as indicated by an end time older than its start time +// (typically year 1753). +type FilterLiveSpansExporter struct { + sdktrace.SpanExporter +} + +// ExportSpans passes each span to the span processor's OnEnd hook so that it +// can be batched and emitted more efficiently. +func (exp FilterLiveSpansExporter) ExportSpans(ctx context.Context, spans []sdktrace.ReadOnlySpan) error { + filtered := make([]sdktrace.ReadOnlySpan, 0, len(spans)) + for _, span := range spans { + if span.StartTime().After(span.EndTime()) { + } else { + filtered = append(filtered, span) + } + } + if len(filtered) == 0 { + return nil + } + return exp.SpanExporter.ExportSpans(ctx, filtered) +} + +type LogForwarder struct { + Processors []sdklog.Processor +} + +var _ sdklog.Exporter = LogForwarder{} + +func (m LogForwarder) Export(ctx context.Context, logs []sdklog.Record) error { + eg := new(errgroup.Group) + for _, e := range m.Processors { + e := e + eg.Go(func() error { + for _, log := range logs { + _ = e.OnEmit(ctx, log) + } + return nil + }) + } + return eg.Wait() +} + +func (m LogForwarder) Shutdown(ctx context.Context) error { + eg := new(errgroup.Group) + for _, e := range m.Processors { + e := e + eg.Go(func() error { + return e.Shutdown(ctx) + }) + } + return eg.Wait() +} + +func (m LogForwarder) ForceFlush(ctx context.Context) error { + eg := new(errgroup.Group) + for _, e := range m.Processors { + e := e + eg.Go(func() error { + return e.ForceFlush(ctx) + }) + } + return eg.Wait() +} diff --git a/dagger/internal/telemetry/init.go b/dagger/internal/telemetry/init.go new file mode 100644 index 000000000..66d2e6d04 --- /dev/null +++ b/dagger/internal/telemetry/init.go @@ -0,0 +1,370 @@ +package telemetry + +import ( + "context" + "fmt" + "log/slog" + "net" + "net/url" + "os" + "strings" + "sync" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" + "go.opentelemetry.io/otel/propagation" + sdklog "go.opentelemetry.io/otel/sdk/log" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + "google.golang.org/grpc" +) + +func OTelConfigured() bool { + for _, env := range os.Environ() { + if strings.HasPrefix(env, "OTEL_") { + return true + } + } + return false +} + +var configuredSpanExporter sdktrace.SpanExporter +var configuredSpanExporterOnce sync.Once + +func ConfiguredSpanExporter(ctx context.Context) (sdktrace.SpanExporter, bool) { + ctx = context.WithoutCancel(ctx) + + configuredSpanExporterOnce.Do(func() { + if !OTelConfigured() { + return + } + + var err error + + var proto string + if v := os.Getenv("OTEL_EXPORTER_OTLP_TRACES_PROTOCOL"); v != "" { + proto = v + } else if v := os.Getenv("OTEL_EXPORTER_OTLP_PROTOCOL"); v != "" { + proto = v + } else { + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md#specify-protocol + proto = "http/protobuf" + } + + var endpoint string + if v := os.Getenv("OTEL_EXPORTER_OTLP_TRACES_ENDPOINT"); v != "" { + endpoint = v + } else if v := os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT"); v != "" { + if proto == "http/protobuf" { + endpoint, err = url.JoinPath(v, "v1", "traces") + if err != nil { + slog.Warn("failed to join path", "error", err) + return + } + } else { + endpoint = v + } + } + + switch proto { + case "http/protobuf", "http": + configuredSpanExporter, err = otlptracehttp.New(ctx, + otlptracehttp.WithEndpointURL(endpoint)) + case "grpc": + var u *url.URL + u, err = url.Parse(endpoint) + if err != nil { + slog.Warn("bad OTLP logs endpoint %q: %w", endpoint, err) + return + } + opts := []otlptracegrpc.Option{ + otlptracegrpc.WithEndpointURL(endpoint), + } + if u.Scheme == "unix" { + dialer := func(ctx context.Context, addr string) (net.Conn, error) { + return net.Dial(u.Scheme, u.Path) + } + opts = append(opts, + otlptracegrpc.WithDialOption(grpc.WithContextDialer(dialer)), + otlptracegrpc.WithInsecure()) + } + configuredSpanExporter, err = otlptracegrpc.New(ctx, opts...) + default: + err = fmt.Errorf("unknown OTLP protocol: %s", proto) + } + if err != nil { + slog.Warn("failed to configure tracing", "error", err) + } + }) + return configuredSpanExporter, configuredSpanExporter != nil +} + +var configuredLogExporter sdklog.Exporter +var configuredLogExporterOnce sync.Once + +func ConfiguredLogExporter(ctx context.Context) (sdklog.Exporter, bool) { + ctx = context.WithoutCancel(ctx) + + configuredLogExporterOnce.Do(func() { + var err error + + var endpoint string + if v := os.Getenv("OTEL_EXPORTER_OTLP_LOGS_ENDPOINT"); v != "" { + endpoint = v + } else if v := os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT"); v != "" { + // we can't assume all OTLP endpoints support logs. better to be explicit + // than have noisy otel errors. + return + } + if endpoint == "" { + return + } + + var proto string + if v := os.Getenv("OTEL_EXPORTER_OTLP_LOGS_PROTOCOL"); v != "" { + proto = v + } else if v := os.Getenv("OTEL_EXPORTER_OTLP_PROTOCOL"); v != "" { + proto = v + } else { + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md#specify-protocol + proto = "http/protobuf" + } + + switch proto { + case "http/protobuf", "http": + headers := map[string]string{} + if hs := os.Getenv("OTEL_EXPORTER_OTLP_HEADERS"); hs != "" { + for _, header := range strings.Split(hs, ",") { + name, value, _ := strings.Cut(header, "=") + headers[name] = value + } + } + configuredLogExporter, err = otlploghttp.New(ctx, + otlploghttp.WithEndpointURL(endpoint), + otlploghttp.WithHeaders(headers)) + + case "grpc": + // FIXME: bring back when it's actually implemented + + // u, err := url.Parse(endpoint) + // if err != nil { + // slog.Warn("bad OTLP logs endpoint %q: %w", endpoint, err) + // return + // } + // + opts := []otlploggrpc.Option{ + // otlploggrpc.WithEndpointURL(endpoint), + } + // if u.Scheme == "unix" { + // dialer := func(ctx context.Context, addr string) (net.Conn, error) { + // return net.Dial(u.Scheme, u.Path) + // } + // opts = append(opts, + // otlploggrpc.WithDialOption(grpc.WithContextDialer(dialer)), + // otlploggrpc.WithInsecure()) + // } + configuredLogExporter, err = otlploggrpc.New(ctx, opts...) + + default: + err = fmt.Errorf("unknown OTLP protocol: %s", proto) + } + if err != nil { + slog.Warn("failed to configure logging", "error", err) + } + }) + return configuredLogExporter, configuredLogExporter != nil +} + +// FallbackResource is the fallback resource definition. A more specific +// resource should be set in Init. +func FallbackResource() *resource.Resource { + return resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceNameKey.String("dagger"), + ) +} + +var ( + // set by Init, closed by Close + tracerProvider *sdktrace.TracerProvider = sdktrace.NewTracerProvider() + loggerProvider *sdklog.LoggerProvider = sdklog.NewLoggerProvider() +) + +type Config struct { + // Auto-detect exporters from OTEL_* env variables. + Detect bool + + // SpanProcessors are processors to prepend to the telemetry pipeline. + SpanProcessors []sdktrace.SpanProcessor + + // LiveTraceExporters are exporters that can receive updates for spans at runtime, + // rather than waiting until the span ends. + // + // Example: TUI, Cloud + LiveTraceExporters []sdktrace.SpanExporter + + // BatchedTraceExporters are exporters that receive spans in batches, after the + // spans have ended. + // + // Example: Honeycomb, Jaeger, etc. + BatchedTraceExporters []sdktrace.SpanExporter + + // LiveLogExporters are exporters that receive logs in batches of ~100ms. + LiveLogExporters []sdklog.Exporter + + // Resource is the resource describing this component and runtime + // environment. + Resource *resource.Resource +} + +// NearlyImmediate is 100ms, below which has diminishing returns in terms of +// visual perception vs. performance cost. +const NearlyImmediate = 100 * time.Millisecond + +// LiveTracesEnabled indicates that the configured OTEL_* exporter should be +// sent live span telemetry. +var LiveTracesEnabled = os.Getenv("OTEL_EXPORTER_OTLP_TRACES_LIVE") != "" + +var SpanProcessors = []sdktrace.SpanProcessor{} +var LogProcessors = []sdklog.Processor{} + +func InitEmbedded(ctx context.Context, res *resource.Resource) context.Context { + traceCfg := Config{ + Detect: false, // false, since we want "live" exporting + Resource: res, + } + if exp, ok := ConfiguredSpanExporter(ctx); ok { + traceCfg.LiveTraceExporters = append(traceCfg.LiveTraceExporters, exp) + } + if exp, ok := ConfiguredLogExporter(ctx); ok { + traceCfg.LiveLogExporters = append(traceCfg.LiveLogExporters, exp) + } + return Init(ctx, traceCfg) +} + +// Init sets up the global OpenTelemetry providers tracing, logging, and +// someday metrics providers. It is called by the CLI, the engine, and the +// container shim, so it needs to be versatile. +func Init(ctx context.Context, cfg Config) context.Context { + // Set up a text map propagator so that things, well, propagate. The default + // is a noop. + otel.SetTextMapPropagator(propagation.NewCompositeTextMapPropagator( + propagation.TraceContext{}, + propagation.Baggage{}, + )) + + // Inherit trace context from env if present. + ctx = otel.GetTextMapPropagator().Extract(ctx, NewEnvCarrier(true)) + + // Log to slog. + otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) { + slog.Error("failed to emit telemetry", "error", err) + })) + + if cfg.Resource == nil { + cfg.Resource = FallbackResource() + } + + if cfg.Detect { + if exp, ok := ConfiguredSpanExporter(ctx); ok { + if LiveTracesEnabled { + cfg.LiveTraceExporters = append(cfg.LiveTraceExporters, exp) + } else { + cfg.BatchedTraceExporters = append(cfg.BatchedTraceExporters, + // Filter out unfinished spans to avoid confusing external systems. + // + // Normally we avoid sending them here by virtue of putting this into + // BatchedTraceExporters, but that only applies to the local process. + // Unfinished spans may end up here if they're proxied out of the + // engine via Params.EngineTrace. + FilterLiveSpansExporter{exp}) + } + } + if exp, ok := ConfiguredLogExporter(ctx); ok { + cfg.LiveLogExporters = append(cfg.LiveLogExporters, exp) + } + } + + traceOpts := []sdktrace.TracerProviderOption{ + sdktrace.WithResource(cfg.Resource), + } + + SpanProcessors = cfg.SpanProcessors + + for _, exporter := range cfg.LiveTraceExporters { + processor := NewLiveSpanProcessor(exporter) + SpanProcessors = append(SpanProcessors, processor) + } + for _, exporter := range cfg.BatchedTraceExporters { + processor := sdktrace.NewBatchSpanProcessor(exporter) + SpanProcessors = append(SpanProcessors, processor) + } + for _, proc := range SpanProcessors { + traceOpts = append(traceOpts, sdktrace.WithSpanProcessor(proc)) + } + + tracerProvider = sdktrace.NewTracerProvider(traceOpts...) + + // Register our TracerProvider as the global so any imported instrumentation + // in the future will default to using it. + // + // NB: this is also necessary so that we can establish a root span, otherwise + // telemetry doesn't work. + otel.SetTracerProvider(tracerProvider) + + // Set up a log provider if configured. + if len(cfg.LiveLogExporters) > 0 { + logOpts := []sdklog.LoggerProviderOption{} + for _, exp := range cfg.LiveLogExporters { + processor := sdklog.NewBatchProcessor(exp, + sdklog.WithExportInterval(NearlyImmediate)) + LogProcessors = append(LogProcessors, processor) + logOpts = append(logOpts, sdklog.WithProcessor(processor)) + } + loggerProvider = sdklog.NewLoggerProvider(logOpts...) + + // TODO: someday do the following (once it exists) + // Register our TracerProvider as the global so any imported + // instrumentation in the future will default to using it. + // otel.SetLoggerProvider(loggerProvider) + } + + return ctx +} + +// Flush drains telemetry data, and is typically called just before a client +// goes away. +// +// NB: now that we wait for all spans to complete, this is less necessary, but +// it seems wise to keep it anyway, as the spots where it are needed are hard +// to find. +func Flush(ctx context.Context) { + if tracerProvider != nil { + if err := tracerProvider.ForceFlush(ctx); err != nil { + slog.Error("failed to flush spans", "error", err) + } + } +} + +// Close shuts down the global OpenTelemetry providers, flushing any remaining +// data to the configured exporters. +func Close() { + flushCtx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + Flush(flushCtx) + if tracerProvider != nil { + if err := tracerProvider.Shutdown(flushCtx); err != nil { + slog.Error("failed to shut down tracer provider", "error", err) + } + } + if loggerProvider != nil { + if err := loggerProvider.Shutdown(flushCtx); err != nil { + slog.Error("failed to shut down logger provider", "error", err) + } + } +} diff --git a/dagger/internal/telemetry/live.go b/dagger/internal/telemetry/live.go new file mode 100644 index 000000000..7294457b3 --- /dev/null +++ b/dagger/internal/telemetry/live.go @@ -0,0 +1,31 @@ +package telemetry + +import ( + "context" + + sdktrace "go.opentelemetry.io/otel/sdk/trace" +) + +// LiveSpanProcessor is a SpanProcessor whose OnStart calls OnEnd on the +// underlying SpanProcessor in order to send live telemetry. +type LiveSpanProcessor struct { + sdktrace.SpanProcessor +} + +func NewLiveSpanProcessor(exp sdktrace.SpanExporter) *LiveSpanProcessor { + return &LiveSpanProcessor{ + SpanProcessor: sdktrace.NewBatchSpanProcessor( + // NOTE: span heartbeating is handled by the Cloud exporter + exp, + sdktrace.WithBatchTimeout(NearlyImmediate), + ), + } +} + +func (p *LiveSpanProcessor) OnStart(ctx context.Context, span sdktrace.ReadWriteSpan) { + // Send a read-only snapshot of the live span downstream so it can be + // filtered out by FilterLiveSpansExporter. Otherwise the span can complete + // before being exported, resulting in two completed spans being sent, which + // will confuse traditional OpenTelemetry services. + p.SpanProcessor.OnEnd(SnapshotSpan(span)) +} diff --git a/dagger/internal/telemetry/logging.go b/dagger/internal/telemetry/logging.go new file mode 100644 index 000000000..29fbd4236 --- /dev/null +++ b/dagger/internal/telemetry/logging.go @@ -0,0 +1,102 @@ +package telemetry + +import ( + "context" + "errors" + "io" + "time" + + "go.opentelemetry.io/otel/log" +) + +// Logger returns a logger with the given name. +func Logger(name string) log.Logger { + return loggerProvider.Logger(name) // TODO more instrumentation attrs +} + +// SpanStdio returns a pair of io.WriteClosers which will send log records with +// stdio.stream=1 for stdout and stdio.stream=2 for stderr. Closing either of +// them will send a log record for that stream with an empty body and +// stdio.eof=true. +// +// SpanStdio should be used when a span represents a process that writes to +// stdout/stderr and terminates them with an EOF, to confirm that all data has +// been received. It should not be used for general-purpose logging. +// +// Both streamsm must be closed to ensure that draining completes. +func SpanStdio(ctx context.Context, name string, attrs ...log.KeyValue) SpanStreams { + logger := Logger(name) + return SpanStreams{ + Stdout: &spanStream{ + Writer: &Writer{ + ctx: ctx, + logger: logger, + attrs: append([]log.KeyValue{log.Int(StdioStreamAttr, 1)}, attrs...), + }, + }, + Stderr: &spanStream{ + Writer: &Writer{ + ctx: ctx, + logger: logger, + attrs: append([]log.KeyValue{log.Int(StdioStreamAttr, 2)}, attrs...), + }, + }, + } +} + +// Writer is an io.Writer that emits log records. +type Writer struct { + ctx context.Context + logger log.Logger + attrs []log.KeyValue +} + +// NewWriter returns a new Writer that emits log records with the given logger +// name and attributes. +func NewWriter(ctx context.Context, name string, attrs ...log.KeyValue) io.Writer { + return &Writer{ + ctx: ctx, + logger: Logger(name), + attrs: attrs, + } +} + +// Write emits a log record with the given payload as a string body. +func (w *Writer) Write(p []byte) (int, error) { + w.Emit(log.StringValue(string(p))) + return len(p), nil +} + +// Emit sends a log record with the given body and additional attributes. +func (w *Writer) Emit(body log.Value, attrs ...log.KeyValue) { + rec := log.Record{} + rec.SetTimestamp(time.Now()) + rec.SetBody(body) + rec.AddAttributes(w.attrs...) + rec.AddAttributes(attrs...) + w.logger.Emit(w.ctx, rec) +} + +// SpanStreams contains the stdout and stderr for a span. +type SpanStreams struct { + Stdout io.WriteCloser + Stderr io.WriteCloser +} + +// Calling Close closes both streams. +func (sl SpanStreams) Close() error { + return errors.Join( + sl.Stdout.Close(), + sl.Stderr.Close(), + ) +} + +type spanStream struct { + *Writer +} + +// Close emits an EOF log record. +func (w *spanStream) Close() error { + w.Writer.Emit(log.StringValue(""), log.Bool(StdioEOFAttr, true)) + return nil +} diff --git a/dagger/internal/telemetry/proxy.go b/dagger/internal/telemetry/proxy.go new file mode 100644 index 000000000..7ebea2d95 --- /dev/null +++ b/dagger/internal/telemetry/proxy.go @@ -0,0 +1,6 @@ +package telemetry + +// FIXME: this file exists to plant a "tombstone" over the previously generated +// proxy.go file. +// +// We should maybe just withoutDirectory('./internal') or something instead. diff --git a/dagger/internal/telemetry/span.go b/dagger/internal/telemetry/span.go new file mode 100644 index 000000000..35c7cadf2 --- /dev/null +++ b/dagger/internal/telemetry/span.go @@ -0,0 +1,45 @@ +package telemetry + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +// Encapsulate can be applied to a span to indicate that this span should +// collapse its children by default. +func Encapsulate() trace.SpanStartOption { + return trace.WithAttributes(attribute.Bool(UIEncapsulateAttr, true)) +} + +// Encapsulated can be applied to a child span to indicate that it should be +// collapsed by default. +func Encapsulated() trace.SpanStartOption { + return trace.WithAttributes(attribute.Bool(UIEncapsulatedAttr, true)) +} + +// Internal can be applied to a span to indicate that this span should not be +// shown to the user by default. +func Internal() trace.SpanStartOption { + return trace.WithAttributes(attribute.Bool(UIInternalAttr, true)) +} + +// Passthrough can be applied to a span to cause the UI to skip over it and +// show its children instead. +func Passthrough() trace.SpanStartOption { + return trace.WithAttributes(attribute.Bool(UIPassthroughAttr, true)) +} + +// End is a helper to end a span with an error if the function returns an error. +// +// It is optimized for use as a defer one-liner with a function that has a +// named error return value, conventionally `rerr`. +// +// defer telemetry.End(span, func() error { return rerr }) +func End(span trace.Span, fn func() error) { + if err := fn(); err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + } + span.End() +} diff --git a/dagger/internal/telemetry/transform.go b/dagger/internal/telemetry/transform.go new file mode 100644 index 000000000..99c86c182 --- /dev/null +++ b/dagger/internal/telemetry/transform.go @@ -0,0 +1,958 @@ +package telemetry + +import ( + "fmt" + "log/slog" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/log" + "go.opentelemetry.io/otel/sdk/instrumentation" + sdklog "go.opentelemetry.io/otel/sdk/log" + "go.opentelemetry.io/otel/sdk/resource" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + otlpcommonv1 "go.opentelemetry.io/proto/otlp/common/v1" + otlplogsv1 "go.opentelemetry.io/proto/otlp/logs/v1" + otlpresourcev1 "go.opentelemetry.io/proto/otlp/resource/v1" + otlptracev1 "go.opentelemetry.io/proto/otlp/trace/v1" +) + +func SnapshotSpan(span sdktrace.ReadOnlySpan) sdktrace.ReadOnlySpan { + return SpansFromPB(SpansToPB([]sdktrace.ReadOnlySpan{span}))[0] +} + +func LogsToPB(sdl []sdklog.Record) []*otlplogsv1.ResourceLogs { + if len(sdl) == 0 { + return nil + } + + rsm := make(map[attribute.Distinct]*otlplogsv1.ResourceLogs) + + type key struct { + r attribute.Distinct + is instrumentation.Scope + } + ssm := make(map[key]*otlplogsv1.ScopeLogs) + + var resources int + for _, sd := range sdl { + res := sd.Resource() + rKey := res.Equivalent() + k := key{ + r: rKey, + is: sd.InstrumentationScope(), + } + scopeLog, iOk := ssm[k] + if !iOk { + // Either the resource or instrumentation scope were unknown. + scopeLog = &otlplogsv1.ScopeLogs{ + Scope: InstrumentationScope(sd.InstrumentationScope()), + LogRecords: []*otlplogsv1.LogRecord{}, + SchemaUrl: sd.InstrumentationScope().SchemaURL, + } + } + scopeLog.LogRecords = append(scopeLog.LogRecords, logRecord(sd)) + ssm[k] = scopeLog + + rs, rOk := rsm[rKey] + if !rOk { + resources++ + // The resource was unknown. + rs = &otlplogsv1.ResourceLogs{ + Resource: Resource(res), + ScopeLogs: []*otlplogsv1.ScopeLogs{scopeLog}, + SchemaUrl: res.SchemaURL(), + } + rsm[rKey] = rs + continue + } + + // The resource has been seen before. Check if the instrumentation + // library lookup was unknown because if so we need to add it to the + // ResourceSpans. Otherwise, the instrumentation library has already + // been seen and the append we did above will be included it in the + // ScopeSpans reference. + if !iOk { + rs.ScopeLogs = append(rs.ScopeLogs, scopeLog) + } + } + + // Transform the categorized map into a slice + rss := make([]*otlplogsv1.ResourceLogs, 0, resources) + for _, rs := range rsm { + rss = append(rss, rs) + } + return rss +} + +func InstrumentationScope(il instrumentation.Scope) *otlpcommonv1.InstrumentationScope { + if il == (instrumentation.Scope{}) { + return nil + } + return &otlpcommonv1.InstrumentationScope{ + Name: il.Name, + Version: il.Version, + } +} + +// span transforms a Span into an OTLP span. +func logRecord(l sdklog.Record) *otlplogsv1.LogRecord { + attrs := []*otlpcommonv1.KeyValue{} + l.WalkAttributes(func(kv log.KeyValue) bool { + attrs = append(attrs, &otlpcommonv1.KeyValue{ + Key: kv.Key, + Value: logValueToPB(kv.Value), + }) + return true + }) + + tid, sid := l.TraceID(), l.SpanID() + s := &otlplogsv1.LogRecord{ + TimeUnixNano: uint64(l.Timestamp().UnixNano()), + SeverityNumber: otlplogsv1.SeverityNumber(l.Severity()), + SeverityText: l.SeverityText(), + Body: logValueToPB(l.Body()), + Attributes: attrs, + // DroppedAttributesCount: 0, + // Flags: 0, + TraceId: tid[:], + SpanId: sid[:], + } + + return s +} + +// Resource transforms a Resource into an OTLP Resource. +func Resource(r resource.Resource) *otlpresourcev1.Resource { + return &otlpresourcev1.Resource{Attributes: resourceAttributes(r)} +} + +// Resource transforms a Resource into an OTLP Resource. +func ResourcePtr(r *resource.Resource) *otlpresourcev1.Resource { + if r == nil { + return nil + } + return &otlpresourcev1.Resource{Attributes: resourceAttributes(*r)} +} + +func resourceAttributes(res resource.Resource) []*otlpcommonv1.KeyValue { + return iterator(res.Iter()) +} + +func iterator(iter attribute.Iterator) []*otlpcommonv1.KeyValue { + l := iter.Len() + if l == 0 { + return nil + } + + out := make([]*otlpcommonv1.KeyValue, 0, l) + for iter.Next() { + out = append(out, keyValueToPB(iter.Attribute())) + } + return out +} + +func keyValueToPB(kv attribute.KeyValue) *otlpcommonv1.KeyValue { + return &otlpcommonv1.KeyValue{Key: string(kv.Key), Value: value(kv.Value)} +} + +// value transforms an attribute value into an OTLP AnyValue. +func value(v attribute.Value) *otlpcommonv1.AnyValue { + av := new(otlpcommonv1.AnyValue) + switch v.Type() { + case attribute.BOOL: + av.Value = &otlpcommonv1.AnyValue_BoolValue{ + BoolValue: v.AsBool(), + } + case attribute.BOOLSLICE: + av.Value = &otlpcommonv1.AnyValue_ArrayValue{ + ArrayValue: &otlpcommonv1.ArrayValue{ + Values: boolSliceValues(v.AsBoolSlice()), + }, + } + case attribute.INT64: + av.Value = &otlpcommonv1.AnyValue_IntValue{ + IntValue: v.AsInt64(), + } + case attribute.INT64SLICE: + av.Value = &otlpcommonv1.AnyValue_ArrayValue{ + ArrayValue: &otlpcommonv1.ArrayValue{ + Values: int64SliceValues(v.AsInt64Slice()), + }, + } + case attribute.FLOAT64: + av.Value = &otlpcommonv1.AnyValue_DoubleValue{ + DoubleValue: v.AsFloat64(), + } + case attribute.FLOAT64SLICE: + av.Value = &otlpcommonv1.AnyValue_ArrayValue{ + ArrayValue: &otlpcommonv1.ArrayValue{ + Values: float64SliceValues(v.AsFloat64Slice()), + }, + } + case attribute.STRING: + av.Value = &otlpcommonv1.AnyValue_StringValue{ + StringValue: v.AsString(), + } + case attribute.STRINGSLICE: + av.Value = &otlpcommonv1.AnyValue_ArrayValue{ + ArrayValue: &otlpcommonv1.ArrayValue{ + Values: stringSliceValues(v.AsStringSlice()), + }, + } + default: + av.Value = &otlpcommonv1.AnyValue_StringValue{ + StringValue: "INVALID", + } + } + return av +} + +func boolSliceValues(vals []bool) []*otlpcommonv1.AnyValue { + converted := make([]*otlpcommonv1.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &otlpcommonv1.AnyValue{ + Value: &otlpcommonv1.AnyValue_BoolValue{ + BoolValue: v, + }, + } + } + return converted +} + +func int64SliceValues(vals []int64) []*otlpcommonv1.AnyValue { + converted := make([]*otlpcommonv1.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &otlpcommonv1.AnyValue{ + Value: &otlpcommonv1.AnyValue_IntValue{ + IntValue: v, + }, + } + } + return converted +} + +func float64SliceValues(vals []float64) []*otlpcommonv1.AnyValue { + converted := make([]*otlpcommonv1.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &otlpcommonv1.AnyValue{ + Value: &otlpcommonv1.AnyValue_DoubleValue{ + DoubleValue: v, + }, + } + } + return converted +} + +func stringSliceValues(vals []string) []*otlpcommonv1.AnyValue { + converted := make([]*otlpcommonv1.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &otlpcommonv1.AnyValue{ + Value: &otlpcommonv1.AnyValue_StringValue{ + StringValue: v, + }, + } + } + return converted +} + +// SpansFromPB transforms a slice of OTLP ResourceSpans into a slice of +// ReadOnlySpans. +func SpansFromPB(sdl []*otlptracev1.ResourceSpans) []sdktrace.ReadOnlySpan { + if len(sdl) == 0 { + return nil + } + + var out []sdktrace.ReadOnlySpan + + for _, sd := range sdl { + if sd == nil { + continue + } + + for _, sdi := range sd.ScopeSpans { + if sdi == nil { + continue + } + sda := make([]sdktrace.ReadOnlySpan, 0, len(sdi.Spans)) + for _, s := range sdi.Spans { + if s == nil { + continue + } + sda = append(sda, &readOnlySpan{ + pb: s, + is: sdi.Scope, + resource: sd.Resource, + schemaURL: sd.SchemaUrl, + }) + } + out = append(out, sda...) + } + } + + return out +} + +// SpansToPB transforms a slice of OpenTelemetry spans into a slice of OTLP +// ResourceSpans. +func SpansToPB(sdl []sdktrace.ReadOnlySpan) []*otlptracev1.ResourceSpans { + if len(sdl) == 0 { + return nil + } + + rsm := make(map[attribute.Distinct]*otlptracev1.ResourceSpans) + + type key struct { + r attribute.Distinct + is instrumentation.Scope + } + ssm := make(map[key]*otlptracev1.ScopeSpans) + + var resources int + for _, sd := range sdl { + if sd == nil { + continue + } + + rKey := sd.Resource().Equivalent() + k := key{ + r: rKey, + is: sd.InstrumentationScope(), + } + scopeSpan, iOk := ssm[k] + if !iOk { + // Either the resource or instrumentation scope were unknown. + scopeSpan = &otlptracev1.ScopeSpans{ + Scope: InstrumentationScope(sd.InstrumentationScope()), + Spans: []*otlptracev1.Span{}, + SchemaUrl: sd.InstrumentationScope().SchemaURL, + } + } + scopeSpan.Spans = append(scopeSpan.Spans, spanToPB(sd)) + ssm[k] = scopeSpan + + rs, rOk := rsm[rKey] + if !rOk { + resources++ + // The resource was unknown. + rs = &otlptracev1.ResourceSpans{ + Resource: ResourcePtr(sd.Resource()), + ScopeSpans: []*otlptracev1.ScopeSpans{scopeSpan}, + SchemaUrl: sd.Resource().SchemaURL(), + } + rsm[rKey] = rs + continue + } + + // The resource has been seen before. Check if the instrumentation + // library lookup was unknown because if so we need to add it to the + // ResourceSpans. Otherwise, the instrumentation library has already + // been seen and the append we did above will be included it in the + // ScopeSpans reference. + if !iOk { + rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan) + } + } + + // Transform the categorized map into a slice + rss := make([]*otlptracev1.ResourceSpans, 0, resources) + for _, rs := range rsm { + rss = append(rss, rs) + } + return rss +} + +// spanToPB transforms a Span into an OTLP span. +func spanToPB(sd sdktrace.ReadOnlySpan) *otlptracev1.Span { + if sd == nil { + return nil + } + + tid := sd.SpanContext().TraceID() + sid := sd.SpanContext().SpanID() + + s := &otlptracev1.Span{ + TraceId: tid[:], + SpanId: sid[:], + TraceState: sd.SpanContext().TraceState().String(), + Status: status(sd.Status().Code, sd.Status().Description), + StartTimeUnixNano: uint64(sd.StartTime().UnixNano()), + EndTimeUnixNano: uint64(sd.EndTime().UnixNano()), + Links: linksToPB(sd.Links()), + Kind: spanKindToPB(sd.SpanKind()), + Name: sd.Name(), + Attributes: KeyValues(sd.Attributes()), + Events: spanEventsToPB(sd.Events()), + DroppedAttributesCount: uint32(sd.DroppedAttributes()), + DroppedEventsCount: uint32(sd.DroppedEvents()), + DroppedLinksCount: uint32(sd.DroppedLinks()), + } + + if psid := sd.Parent().SpanID(); psid.IsValid() { + s.ParentSpanId = psid[:] + } + s.Flags = buildSpanFlags(sd.Parent()) + + return s +} + +// status transform a span code and message into an OTLP span status. +func status(status codes.Code, message string) *otlptracev1.Status { + var c otlptracev1.Status_StatusCode + switch status { + case codes.Ok: + c = otlptracev1.Status_STATUS_CODE_OK + case codes.Error: + c = otlptracev1.Status_STATUS_CODE_ERROR + default: + c = otlptracev1.Status_STATUS_CODE_UNSET + } + return &otlptracev1.Status{ + Code: c, + Message: message, + } +} + +// KeyValues transforms a slice of attribute KeyValues into OTLP key-values. +func KeyValues(attrs []attribute.KeyValue) []*otlpcommonv1.KeyValue { + if len(attrs) == 0 { + return nil + } + + out := make([]*otlpcommonv1.KeyValue, 0, len(attrs)) + for _, kv := range attrs { + out = append(out, keyValueToPB(kv)) + } + return out +} + +// linksFromPB transforms span Links to OTLP span linksFromPB. +func linksToPB(links []sdktrace.Link) []*otlptracev1.Span_Link { + if len(links) == 0 { + return nil + } + + sl := make([]*otlptracev1.Span_Link, 0, len(links)) + for _, otLink := range links { + // This redefinition is necessary to prevent otLink.*ID[:] copies + // being reused -- in short we need a new otLink per iteration. + otLink := otLink + + tid := otLink.SpanContext.TraceID() + sid := otLink.SpanContext.SpanID() + + flags := buildSpanFlags(otLink.SpanContext) + + sl = append(sl, &otlptracev1.Span_Link{ + TraceId: tid[:], + SpanId: sid[:], + Attributes: KeyValues(otLink.Attributes), + DroppedAttributesCount: uint32(otLink.DroppedAttributeCount), + Flags: flags, + }) + } + return sl +} + +func buildSpanFlags(sc trace.SpanContext) uint32 { + flags := otlptracev1.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK + if sc.IsRemote() { + flags |= otlptracev1.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK + } + + return uint32(flags) +} + +// spanEventsToPB transforms span Events to an OTLP span events. +func spanEventsToPB(es []sdktrace.Event) []*otlptracev1.Span_Event { + if len(es) == 0 { + return nil + } + + events := make([]*otlptracev1.Span_Event, len(es)) + // Transform message events + for i := 0; i < len(es); i++ { + events[i] = &otlptracev1.Span_Event{ + Name: es[i].Name, + TimeUnixNano: uint64(es[i].Time.UnixNano()), + Attributes: KeyValues(es[i].Attributes), + DroppedAttributesCount: uint32(es[i].DroppedAttributeCount), + } + } + return events +} + +// spanKindToPB transforms a SpanKind to an OTLP span kind. +func spanKindToPB(kind trace.SpanKind) otlptracev1.Span_SpanKind { + switch kind { + case trace.SpanKindInternal: + return otlptracev1.Span_SPAN_KIND_INTERNAL + case trace.SpanKindClient: + return otlptracev1.Span_SPAN_KIND_CLIENT + case trace.SpanKindServer: + return otlptracev1.Span_SPAN_KIND_SERVER + case trace.SpanKindProducer: + return otlptracev1.Span_SPAN_KIND_PRODUCER + case trace.SpanKindConsumer: + return otlptracev1.Span_SPAN_KIND_CONSUMER + default: + return otlptracev1.Span_SPAN_KIND_UNSPECIFIED + } +} + +type readOnlySpan struct { + // Embed the interface to implement the private method. + sdktrace.ReadOnlySpan + + pb *otlptracev1.Span + is *otlpcommonv1.InstrumentationScope + resource *otlpresourcev1.Resource + schemaURL string +} + +func (s *readOnlySpan) Name() string { + return s.pb.Name +} + +func (s *readOnlySpan) SpanContext() trace.SpanContext { + var tid trace.TraceID + copy(tid[:], s.pb.TraceId) + var sid trace.SpanID + copy(sid[:], s.pb.SpanId) + + st, _ := trace.ParseTraceState(s.pb.TraceState) + + return trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: tid, + SpanID: sid, + TraceState: st, + TraceFlags: trace.FlagsSampled, + }) +} + +func (s *readOnlySpan) Parent() trace.SpanContext { + if len(s.pb.ParentSpanId) == 0 { + return trace.SpanContext{} + } + var tid trace.TraceID + copy(tid[:], s.pb.TraceId) + var psid trace.SpanID + copy(psid[:], s.pb.ParentSpanId) + return trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: tid, + SpanID: psid, + }) +} + +func (s *readOnlySpan) SpanKind() trace.SpanKind { + return spanKindFromPB(s.pb.Kind) +} + +func (s *readOnlySpan) StartTime() time.Time { + return time.Unix(0, int64(s.pb.StartTimeUnixNano)) +} + +func (s *readOnlySpan) EndTime() time.Time { + return time.Unix(0, int64(s.pb.EndTimeUnixNano)) +} + +func (s *readOnlySpan) Attributes() []attribute.KeyValue { + return AttributesFromProto(s.pb.Attributes) +} + +func (s *readOnlySpan) Links() []sdktrace.Link { + return linksFromPB(s.pb.Links) +} + +func (s *readOnlySpan) Events() []sdktrace.Event { + return spanEventsFromPB(s.pb.Events) +} + +func (s *readOnlySpan) Status() sdktrace.Status { + return sdktrace.Status{ + Code: statusCode(s.pb.Status), + Description: s.pb.Status.GetMessage(), + } +} + +func (s *readOnlySpan) InstrumentationScope() instrumentation.Scope { + return instrumentationScope(s.is) +} + +// Deprecated: use InstrumentationScope. +func (s *readOnlySpan) InstrumentationLibrary() instrumentation.Library { + return s.InstrumentationScope() +} + +// Resource returns information about the entity that produced the span. +func (s *readOnlySpan) Resource() *resource.Resource { + if s.resource == nil { + return nil + } + if s.schemaURL != "" { + return resource.NewWithAttributes(s.schemaURL, AttributesFromProto(s.resource.Attributes)...) + } + return resource.NewSchemaless(AttributesFromProto(s.resource.Attributes)...) +} + +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s *readOnlySpan) DroppedAttributes() int { + return int(s.pb.DroppedAttributesCount) +} + +// DroppedLinks returns the number of links dropped by the span due to +// limits being reached. +func (s *readOnlySpan) DroppedLinks() int { + return int(s.pb.DroppedLinksCount) +} + +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s *readOnlySpan) DroppedEvents() int { + return int(s.pb.DroppedEventsCount) +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s *readOnlySpan) ChildSpanCount() int { + return 0 +} + +var _ sdktrace.ReadOnlySpan = &readOnlySpan{} + +// status transform a OTLP span status into span code. +func statusCode(st *otlptracev1.Status) codes.Code { + if st == nil { + return codes.Unset + } + switch st.Code { + case otlptracev1.Status_STATUS_CODE_ERROR: + return codes.Error + default: + return codes.Ok + } +} + +// linksFromPB transforms OTLP span links to span Links. +func linksFromPB(links []*otlptracev1.Span_Link) []sdktrace.Link { + if len(links) == 0 { + return nil + } + + sl := make([]sdktrace.Link, 0, len(links)) + for _, otLink := range links { + if otLink == nil { + continue + } + // This redefinition is necessary to prevent otLink.*ID[:] copies + // being reused -- in short we need a new otLink per iteration. + otLink := otLink + + var tid trace.TraceID + copy(tid[:], otLink.TraceId) + var sid trace.SpanID + copy(sid[:], otLink.SpanId) + + sctx := trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: tid, + SpanID: sid, + }) + + sl = append(sl, sdktrace.Link{ + SpanContext: sctx, + Attributes: AttributesFromProto(otLink.Attributes), + }) + } + return sl +} + +// spanEventsFromPB transforms OTLP span events to span Events. +func spanEventsFromPB(es []*otlptracev1.Span_Event) []sdktrace.Event { + if len(es) == 0 { + return nil + } + + evCount := len(es) + events := make([]sdktrace.Event, 0, evCount) + messageEvents := 0 + + // Transform message events + for _, e := range es { + if e == nil { + continue + } + messageEvents++ + events = append(events, + sdktrace.Event{ + Name: e.Name, + Time: time.Unix(0, int64(e.TimeUnixNano)), + Attributes: AttributesFromProto(e.Attributes), + DroppedAttributeCount: int(e.DroppedAttributesCount), + }, + ) + } + + return events +} + +// spanKindFromPB transforms a an OTLP span kind to SpanKind. +func spanKindFromPB(kind otlptracev1.Span_SpanKind) trace.SpanKind { + switch kind { + case otlptracev1.Span_SPAN_KIND_INTERNAL: + return trace.SpanKindInternal + case otlptracev1.Span_SPAN_KIND_CLIENT: + return trace.SpanKindClient + case otlptracev1.Span_SPAN_KIND_SERVER: + return trace.SpanKindServer + case otlptracev1.Span_SPAN_KIND_PRODUCER: + return trace.SpanKindProducer + case otlptracev1.Span_SPAN_KIND_CONSUMER: + return trace.SpanKindConsumer + default: + return trace.SpanKindUnspecified + } +} + +// AttributesFromProto transforms a slice of OTLP attribute key-values into a slice of KeyValues +func AttributesFromProto(attrs []*otlpcommonv1.KeyValue) []attribute.KeyValue { + if len(attrs) == 0 { + return nil + } + + out := make([]attribute.KeyValue, 0, len(attrs)) + for _, a := range attrs { + if a == nil { + continue + } + kv := attribute.KeyValue{ + Key: attribute.Key(a.Key), + Value: attrValue(a.Value), + } + out = append(out, kv) + } + return out +} + +func boolArray(kv []*otlpcommonv1.AnyValue) attribute.Value { + arr := make([]bool, len(kv)) + for i, v := range kv { + if v != nil { + arr[i] = v.GetBoolValue() + } + } + return attribute.BoolSliceValue(arr) +} + +func intArray(kv []*otlpcommonv1.AnyValue) attribute.Value { + arr := make([]int64, len(kv)) + for i, v := range kv { + if v != nil { + arr[i] = v.GetIntValue() + } + } + return attribute.Int64SliceValue(arr) +} + +func doubleArray(kv []*otlpcommonv1.AnyValue) attribute.Value { + arr := make([]float64, len(kv)) + for i, v := range kv { + if v != nil { + arr[i] = v.GetDoubleValue() + } + } + return attribute.Float64SliceValue(arr) +} + +func stringArray(kv []*otlpcommonv1.AnyValue) attribute.Value { + arr := make([]string, len(kv)) + for i, v := range kv { + if v != nil { + arr[i] = v.GetStringValue() + } + } + return attribute.StringSliceValue(arr) +} + +func anyArrayToAttrValue(anyVals []*otlpcommonv1.AnyValue) attribute.Value { + vals := make([]attribute.Value, 0, len(anyVals)) + types := map[attribute.Type]int{} + for _, v := range anyVals { + val := attrValue(v) + types[val.Type()]++ + vals = append(vals, val) + } + + var arrType attribute.Type + switch len(types) { + case 0: + // empty; assume string slice + return attribute.StringSliceValue(nil) + case 1: + for arrType = range types { + } + default: + slog.Error("anyArrayToAttrValue: mixed types in Any array", + "types", fmt.Sprintf("%v", types)) + return attribute.StringValue(fmt.Sprintf("%v", vals)) + } + + switch arrType { + case attribute.STRING: + return stringArray(anyVals) + case attribute.INT64: + return intArray(anyVals) + case attribute.FLOAT64: + return doubleArray(anyVals) + case attribute.BOOL: + return boolArray(anyVals) + default: + slog.Error("anyArrayToAttrValue: unhandled array value type conversion", "type", arrType) + return attribute.StringValue(fmt.Sprintf("UNHANDLED ARRAY ELEM TYPE: %+v (%s)", vals, arrType)) + } +} + +func instrumentationScope(is *otlpcommonv1.InstrumentationScope) instrumentation.Scope { + if is == nil { + return instrumentation.Scope{} + } + return instrumentation.Scope{ + Name: is.Name, + Version: is.Version, + } +} + +func LogsFromPB(resLogs []*otlplogsv1.ResourceLogs) []sdklog.Record { + logs := []sdklog.Record{} + for _, rl := range resLogs { + for _, scopeLog := range rl.GetScopeLogs() { + for _, rec := range scopeLog.GetLogRecords() { + var logRec sdklog.Record + logRec.SetTraceID(trace.TraceID(rec.GetTraceId())) + logRec.SetSpanID(trace.SpanID(rec.GetSpanId())) + logRec.SetTimestamp(time.Unix(0, int64(rec.GetTimeUnixNano()))) + logRec.SetBody(logValueFromPB(rec.GetBody())) + logRec.SetSeverity(log.Severity(rec.GetSeverityNumber())) + logRec.SetSeverityText(rec.GetSeverityText()) + logRec.SetObservedTimestamp(time.Unix(0, int64(rec.GetObservedTimeUnixNano()))) + logRec.SetAttributes(logKVs(rec.GetAttributes())...) + logs = append(logs, logRec) + } + } + } + return logs +} + +func logKVs(kvs []*otlpcommonv1.KeyValue) []log.KeyValue { + res := make([]log.KeyValue, len(kvs)) + for i, kv := range kvs { + res[i] = logKeyValue(kv) + } + return res +} + +func logKeyValue(v *otlpcommonv1.KeyValue) log.KeyValue { + return log.KeyValue{ + Key: v.GetKey(), + Value: logValueFromPB(v.GetValue()), + } +} + +func attrValue(v *otlpcommonv1.AnyValue) attribute.Value { + switch x := v.Value.(type) { + case *otlpcommonv1.AnyValue_StringValue: + return attribute.StringValue(v.GetStringValue()) + case *otlpcommonv1.AnyValue_DoubleValue: + return attribute.Float64Value(v.GetDoubleValue()) + case *otlpcommonv1.AnyValue_IntValue: + return attribute.Int64Value(v.GetIntValue()) + case *otlpcommonv1.AnyValue_BoolValue: + return attribute.BoolValue(v.GetBoolValue()) + case *otlpcommonv1.AnyValue_ArrayValue: + return anyArrayToAttrValue(x.ArrayValue.GetValues()) + case *otlpcommonv1.AnyValue_BytesValue: + return attribute.StringValue(string(x.BytesValue)) + default: + slog.Error("otlpcommonv1.AnyValue -> attribute.Value: unhandled type conversion", "type", fmt.Sprintf("%T", x)) + return attribute.StringValue(fmt.Sprintf("UNHANDLED ATTR TYPE: %v (%T)", x, x)) + } +} + +func logValueFromPB(v *otlpcommonv1.AnyValue) log.Value { + switch x := v.Value.(type) { + case *otlpcommonv1.AnyValue_StringValue: + return log.StringValue(v.GetStringValue()) + case *otlpcommonv1.AnyValue_DoubleValue: + return log.Float64Value(v.GetDoubleValue()) + case *otlpcommonv1.AnyValue_IntValue: + return log.Int64Value(v.GetIntValue()) + case *otlpcommonv1.AnyValue_BoolValue: + return log.BoolValue(v.GetBoolValue()) + case *otlpcommonv1.AnyValue_KvlistValue: + kvs := make([]log.KeyValue, 0, len(x.KvlistValue.GetValues())) + for _, kv := range x.KvlistValue.GetValues() { + kvs = append(kvs, logKeyValue(kv)) + } + return log.MapValue(kvs...) + case *otlpcommonv1.AnyValue_ArrayValue: + vals := make([]log.Value, 0, len(x.ArrayValue.GetValues())) + for _, v := range x.ArrayValue.GetValues() { + vals = append(vals, logValueFromPB(v)) + } + return log.SliceValue(vals...) + case *otlpcommonv1.AnyValue_BytesValue: + return log.BytesValue(x.BytesValue) + default: + slog.Error("unhandled otlpcommonv1.AnyValue -> log.Value conversion", "type", fmt.Sprintf("%T", x)) + return log.StringValue(fmt.Sprintf("UNHANDLED LOG VALUE TYPE: %v (%T)", x, x)) + } +} + +// Value transforms an attribute Value into an OTLP AnyValue. +func logValueToPB(v log.Value) *otlpcommonv1.AnyValue { + av := new(otlpcommonv1.AnyValue) + switch v.Kind() { + case log.KindBool: + av.Value = &otlpcommonv1.AnyValue_BoolValue{ + BoolValue: v.AsBool(), + } + case log.KindInt64: + av.Value = &otlpcommonv1.AnyValue_IntValue{ + IntValue: v.AsInt64(), + } + case log.KindFloat64: + av.Value = &otlpcommonv1.AnyValue_DoubleValue{ + DoubleValue: v.AsFloat64(), + } + case log.KindString: + av.Value = &otlpcommonv1.AnyValue_StringValue{ + StringValue: v.AsString(), + } + case log.KindSlice: + array := &otlpcommonv1.ArrayValue{} + for _, e := range v.AsSlice() { + array.Values = append(array.Values, logValueToPB(e)) + } + av.Value = &otlpcommonv1.AnyValue_ArrayValue{ + ArrayValue: array, + } + case log.KindMap: + kvList := &otlpcommonv1.KeyValueList{} + for _, e := range v.AsMap() { + kvList.Values = append(kvList.Values, &otlpcommonv1.KeyValue{ + Key: e.Key, + Value: logValueToPB(e.Value), + }) + } + av.Value = &otlpcommonv1.AnyValue_KvlistValue{ + KvlistValue: kvList, + } + default: + av.Value = &otlpcommonv1.AnyValue_StringValue{ + StringValue: "INVALID", + } + } + return av +} diff --git a/go.work.sum b/go.work.sum index e09f064a7..7b3da5ca9 100644 --- a/go.work.sum +++ b/go.work.sum @@ -143,3 +143,4 @@ k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=