diff --git a/go.mod b/go.mod index fa58129ba735..9ef0a61b710d 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/Microsoft/go-winio v0.6.2 github.com/ProtonMail/go-crypto v1.3.0 github.com/aws/aws-sdk-go-v2/config v1.32.12 - github.com/compose-spec/compose-go/v2 v2.9.1 + github.com/compose-spec/compose-go/v2 v2.10.2 github.com/containerd/console v1.0.5 github.com/containerd/containerd/v2 v2.2.2 github.com/containerd/continuity v0.4.5 @@ -224,6 +224,7 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.40.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v4 v4.0.0-rc.4 // indirect golang.org/x/net v0.51.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect golang.org/x/time v0.14.0 // indirect diff --git a/go.sum b/go.sum index 0f41dd133386..8d275bc4920a 100644 --- a/go.sum +++ b/go.sum @@ -110,8 +110,8 @@ github.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg github.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= -github.com/compose-spec/compose-go/v2 v2.9.1 h1:8UwI+ujNU+9Ffkf/YgAm/qM9/eU7Jn8nHzWG721W4rs= -github.com/compose-spec/compose-go/v2 v2.9.1/go.mod h1:Oky9AZGTRB4E+0VbTPZTUu4Kp+oEMMuwZXZtPPVT1iE= +github.com/compose-spec/compose-go/v2 v2.10.2 h1:USa1NUbDcl/cjb8T9iwnuFsnO79H+2ho2L5SjFKz3uI= +github.com/compose-spec/compose-go/v2 v2.10.2/go.mod h1:ZU6zlcweCZKyiB7BVfCizQT9XmkEIMFE+PRZydVcsZg= github.com/containerd/cgroups/v3 v3.1.3 h1:eUNflyMddm18+yrDmZPn3jI7C5hJ9ahABE5q6dyLYXQ= github.com/containerd/cgroups/v3 v3.1.3/go.mod h1:PKZ2AcWmSBsY/tJUVhtS/rluX0b1uq1GmPO1ElCmbOw= github.com/containerd/console v1.0.5 h1:R0ymNeydRqH2DmakFNdmjR2k0t7UPuiOV/N/27/qqsc= @@ -664,6 +664,8 @@ go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +go.yaml.in/yaml/v4 v4.0.0-rc.4 h1:UP4+v6fFrBIb1l934bDl//mmnoIZEDK0idg1+AIvX5U= +go.yaml.in/yaml/v4 v4.0.0-rc.4/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= diff --git a/vendor/github.com/compose-spec/compose-go/v2/cli/options.go b/vendor/github.com/compose-spec/compose-go/v2/cli/options.go index 13a38242f604..69ea56543685 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/cli/options.go +++ b/vendor/github.com/compose-spec/compose-go/v2/cli/options.go @@ -25,7 +25,7 @@ import ( "strings" "github.com/sirupsen/logrus" - "go.yaml.in/yaml/v3" + "go.yaml.in/yaml/v4" "github.com/compose-spec/compose-go/v2/consts" "github.com/compose-spec/compose-go/v2/dotenv" diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go b/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go index c4fd0be205e3..d85e84bae384 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go @@ -27,7 +27,7 @@ import ( "github.com/compose-spec/compose-go/v2/types" ) -func ApplyExtends(ctx context.Context, dict map[string]any, opts *Options, tracker *cycleTracker, post ...PostProcessor) error { +func ApplyExtends(ctx context.Context, dict map[string]any, opts *Options, tracker *cycleTracker, post PostProcessor) error { a, ok := dict["services"] if !ok { return nil @@ -37,7 +37,7 @@ func ApplyExtends(ctx context.Context, dict map[string]any, opts *Options, track return fmt.Errorf("services must be a mapping") } for name := range services { - merged, err := applyServiceExtends(ctx, name, services, opts, tracker, post...) + merged, err := applyServiceExtends(ctx, name, services, opts, tracker, post) if err != nil { return err } @@ -47,7 +47,7 @@ func ApplyExtends(ctx context.Context, dict map[string]any, opts *Options, track return nil } -func applyServiceExtends(ctx context.Context, name string, services map[string]any, opts *Options, tracker *cycleTracker, post ...PostProcessor) (any, error) { +func applyServiceExtends(ctx context.Context, name string, services map[string]any, opts *Options, tracker *cycleTracker, post PostProcessor) (any, error) { s := services[name] if s == nil { return nil, nil @@ -81,7 +81,7 @@ func applyServiceExtends(ctx context.Context, name string, services map[string]a var ( base any - processor PostProcessor = NoopPostProcessor{} + processor = post ) if file != nil { @@ -114,16 +114,15 @@ func applyServiceExtends(ctx context.Context, name string, services map[string]a } source := deepClone(base).(map[string]any) - for _, processor := range post { - err = processor.Apply(map[string]any{ - "services": map[string]any{ - name: source, - }, - }) - if err != nil { - return nil, err - } + err = post.Apply(map[string]any{ + "services": map[string]any{ + name: source, + }, + }) + if err != nil { + return nil, err } + merged, err := override.ExtendService(source, service) if err != nil { return nil, err diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/full-example.yml b/vendor/github.com/compose-spec/compose-go/v2/loader/full-example.yml deleted file mode 100644 index 944b2d47ff61..000000000000 --- a/vendor/github.com/compose-spec/compose-go/v2/loader/full-example.yml +++ /dev/null @@ -1,461 +0,0 @@ -name: full_example_project_name -services: - - bar: - build: - dockerfile_inline: | - FROM alpine - RUN echo "hello" > /world.txt - - foo: - annotations: - - com.example.foo=bar - build: - context: ./dir - dockerfile: Dockerfile - args: - foo: bar - ssh: - - default - target: foo - network: foo - cache_from: - - foo - - bar - labels: [FOO=BAR] - additional_contexts: - foo: ./bar - secrets: - - source: secret1 - target: /run/secrets/secret1 - - source: secret2 - target: my_secret - uid: '103' - gid: '103' - mode: 0440 - tags: - - foo:v1.0.0 - - docker.io/username/foo:my-other-tag - - ${COMPOSE_PROJECT_NAME}:1.0.0 - platforms: - - linux/amd64 - - linux/arm64 - - - cap_add: - - ALL - - cap_drop: - - NET_ADMIN - - SYS_ADMIN - - cgroup_parent: m-executor-abcd - - # String or list - command: bundle exec thin -p 3000 - # command: ["bundle", "exec", "thin", "-p", "3000"] - - configs: - - config1 - - source: config2 - target: /my_config - uid: '103' - gid: '103' - mode: 0440 - - container_name: my-web-container - - depends_on: - - db - - redis - - deploy: - mode: replicated - replicas: 6 - labels: [FOO=BAR] - rollback_config: - parallelism: 3 - delay: 10s - failure_action: continue - monitor: 60s - max_failure_ratio: 0.3 - order: start-first - update_config: - parallelism: 3 - delay: 10s - failure_action: continue - monitor: 60s - max_failure_ratio: 0.3 - order: start-first - resources: - limits: - cpus: '0.001' - memory: 50M - reservations: - cpus: '0.0001' - memory: 20M - generic_resources: - - discrete_resource_spec: - kind: 'gpu' - value: 2 - - discrete_resource_spec: - kind: 'ssd' - value: 1 - restart_policy: - condition: on-failure - delay: 5s - max_attempts: 3 - window: 120s - placement: - constraints: [node=foo] - max_replicas_per_node: 5 - preferences: - - spread: node.labels.az - endpoint_mode: dnsrr - - device_cgroup_rules: - - "c 1:3 mr" - - "a 7:* rmw" - - devices: - - source: /dev/ttyUSB0 - target: /dev/ttyUSB0 - permissions: rwm - - # String or list - # dns: 8.8.8.8 - dns: - - 8.8.8.8 - - 9.9.9.9 - - # String or list - # dns_search: example.com - dns_search: - - dc1.example.com - - dc2.example.com - - domainname: foo.com - - # String or list - # entrypoint: /code/entrypoint.sh -p 3000 - entrypoint: ["/code/entrypoint.sh", "-p", "3000"] - - # String or list - # env_file: .env - env_file: - - ./example1.env - - path: ./example2.env - required: false - - # Mapping or list - # Mapping values can be strings, numbers or null - # Booleans are not allowed - must be quoted - environment: - BAZ: baz_from_service_def - QUX: - # environment: - # - RACK_ENV=development - # - SHOW=true - # - SESSION_SECRET - - # Items can be strings or numbers - expose: - - "3000" - - 8000 - - external_links: - - redis_1 - - project_db_1:mysql - - project_db_1:postgresql - - # Mapping or list - # Mapping values must be strings - # extra_hosts: - # somehost: "162.242.195.82" - # otherhost: "50.31.209.229" - extra_hosts: - - "otherhost:50.31.209.229" - - "somehost:162.242.195.82" - - hostname: foo - - healthcheck: - test: echo "hello world" - interval: 10s - timeout: 1s - retries: 5 - start_period: 15s - start_interval: 5s - - # Any valid image reference - repo, tag, id, sha - image: redis - # image: ubuntu:14.04 - # image: tutum/influxdb - # image: example-registry.com:4000/postgresql - # image: a4bc65fd - # image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d - - ipc: host - - uts: host - - # Mapping or list - # Mapping values can be strings, numbers or null - labels: - com.example.description: "Accounting webapp" - com.example.number: 42 - com.example.empty-label: - # labels: - # - "com.example.description=Accounting webapp" - # - "com.example.number=42" - # - "com.example.empty-label" - - label_file: - - ./example1.label - - ./example2.label - - links: - - db - - db:database - - redis - - logging: - driver: syslog - options: - syslog-address: "tcp://192.168.0.42:123" - - mac_address: 02:42:ac:11:65:43 - - # network_mode: "bridge" - # network_mode: "host" - # network_mode: "none" - # Use the network mode of an arbitrary container from another service - # network_mode: "service:db" - # Use the network mode of another container, specified by name or id - # network_mode: "container:some-container" - network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b" - - networks: - some-network: - aliases: - - alias1 - - alias3 - other-network: - ipv4_address: 172.16.238.10 - ipv6_address: 2001:3984:3989::10 - mac_address: 02:42:72:98:65:08 - other-other-network: - - pid: "host" - - ports: - - 3000 - - "3001-3005" - - "8000:8000" - - "9090-9091:8080-8081" - - "49100:22" - - "127.0.0.1:8001:8001" - - "127.0.0.1:5000-5010:5000-5010" - - privileged: true - - read_only: true - - restart: always - - secrets: - - source: secret1 - target: /run/secrets/secret1 - - source: secret2 - target: my_secret - uid: '103' - gid: '103' - mode: 0440 - - security_opt: - - label=level:s0:c100,c200 - - label=type:svirt_apache_t - - stdin_open: true - - stop_grace_period: 20s - - stop_signal: SIGUSR1 - storage_opt: - size: "20G" - sysctls: - net.core.somaxconn: 1024 - net.ipv4.tcp_syncookies: 0 - - # String or list - # tmpfs: /run - tmpfs: - - /run - - /tmp - - tty: true - - ulimits: - # Single number or mapping with soft + hard limits - nproc: 65535 - nofile: - soft: 20000 - hard: 40000 - - user: someone - - volumes: - # Just specify a path and let the Engine create a volume - - /var/lib/anonymous - # Specify an absolute path mapping - - /opt/data:/var/lib/data - # Path on the host, relative to the Compose file - - .:/code - - ./static:/var/www/html - # User-relative path - - ~/configs:/etc/configs:ro - # Named volume - - datavolume:/var/lib/volume - - type: bind - source: ./opt - target: /opt/cached - consistency: cached - - type: tmpfs - target: /opt/tmpfs - tmpfs: - size: 10000 - - working_dir: /code - x-bar: baz - x-foo: bar - -networks: - # Entries can be null, which specifies simply that a network - # called "{project name}_some-network" should be created and - # use the default driver - some-network: - - other-network: - driver: overlay - - driver_opts: - # Values can be strings or numbers - foo: "bar" - baz: 1 - - ipam: - driver: overlay - # driver_opts: - # # Values can be strings or numbers - # com.docker.network.enable_ipv6: "true" - # com.docker.network.numeric_value: 1 - config: - - subnet: 172.28.0.0/16 - ip_range: 172.28.5.0/24 - gateway: 172.28.5.254 - aux_addresses: - host1: 172.28.1.5 - host2: 172.28.1.6 - host3: 172.28.1.7 - - subnet: 2001:3984:3989::/64 - gateway: 2001:3984:3989::1 - - labels: - foo: bar - - external-network: - # Specifies that a pre-existing network called "external-network" - # can be referred to within this file as "external-network" - external: true - - other-external-network: - # Specifies that a pre-existing network called "my-cool-network" - # can be referred to within this file as "other-external-network" - external: - name: my-cool-network - x-bar: baz - x-foo: bar - -volumes: - # Entries can be null, which specifies simply that a volume - # called "{project name}_some-volume" should be created and - # use the default driver - some-volume: - - other-volume: - driver: flocker - - driver_opts: - # Values can be strings or numbers - foo: "bar" - baz: 1 - labels: - foo: bar - - another-volume: - name: "user_specified_name" - driver: vsphere - - driver_opts: - # Values can be strings or numbers - foo: "bar" - baz: 1 - - external-volume: - # Specifies that a pre-existing volume called "external-volume" - # can be referred to within this file as "external-volume" - external: true - - other-external-volume: - # Specifies that a pre-existing volume called "my-cool-volume" - # can be referred to within this file as "other-external-volume" - # This example uses the deprecated "volume.external.name" (replaced by "volume.name") - external: - name: my-cool-volume - - external-volume3: - # Specifies that a pre-existing volume called "this-is-volume3" - # can be referred to within this file as "external-volume3" - name: this-is-volume3 - external: true - x-bar: baz - x-foo: bar - -configs: - config1: - file: ./config_data - labels: - foo: bar - config2: - external: - name: my_config - config3: - external: true - config4: - name: foo - file: ~/config_data - x-bar: baz - x-foo: bar - -secrets: - secret1: - file: ./secret_data - labels: - foo: bar - secret2: - external: - name: my_secret - secret3: - external: true - secret4: - name: bar - environment: BAR - x-bar: baz - x-foo: bar - secret5: - file: /abs/secret_data -x-bar: baz -x-foo: bar -x-nested: - bar: baz - foo: bar diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/include.go b/vendor/github.com/compose-spec/compose-go/v2/loader/include.go index 3e49b8d88164..ff310447a773 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/loader/include.go +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/include.go @@ -21,11 +21,12 @@ import ( "fmt" "os" "path/filepath" - "reflect" "strings" "github.com/compose-spec/compose-go/v2/dotenv" interp "github.com/compose-spec/compose-go/v2/interpolation" + "github.com/compose-spec/compose-go/v2/override" + "github.com/compose-spec/compose-go/v2/tree" "github.com/compose-spec/compose-go/v2/types" ) @@ -50,7 +51,7 @@ func loadIncludeConfig(source any) ([]types.IncludeConfig, error) { return requires, err } -func ApplyInclude(ctx context.Context, workingDir string, environment types.Mapping, model map[string]any, options *Options, included []string) error { +func ApplyInclude(ctx context.Context, workingDir string, environment types.Mapping, model map[string]any, options *Options, included []string, processor PostProcessor) error { includeConfig, err := loadIncludeConfig(model["include"]) if err != nil { return err @@ -117,6 +118,9 @@ func ApplyInclude(ctx context.Context, workingDir string, environment types.Mapp } else { envFile := []string{} for _, f := range r.EnvFile { + if f == "/dev/null" { + continue + } if !filepath.IsAbs(f) { f = filepath.Join(workingDir, f) s, err := os.Stat(f) @@ -151,7 +155,7 @@ func ApplyInclude(ctx context.Context, workingDir string, environment types.Mapp if err != nil { return err } - err = importResources(imported, model) + err = importResources(imported, model, processor) if err != nil { return err } @@ -161,29 +165,29 @@ func ApplyInclude(ctx context.Context, workingDir string, environment types.Mapp } // importResources import into model all resources defined by imported, and report error on conflict -func importResources(source map[string]any, target map[string]any) error { - if err := importResource(source, target, "services"); err != nil { +func importResources(source map[string]any, target map[string]any, processor PostProcessor) error { + if err := importResource(source, target, "services", processor); err != nil { return err } - if err := importResource(source, target, "volumes"); err != nil { + if err := importResource(source, target, "volumes", processor); err != nil { return err } - if err := importResource(source, target, "networks"); err != nil { + if err := importResource(source, target, "networks", processor); err != nil { return err } - if err := importResource(source, target, "secrets"); err != nil { + if err := importResource(source, target, "secrets", processor); err != nil { return err } - if err := importResource(source, target, "configs"); err != nil { + if err := importResource(source, target, "configs", processor); err != nil { return err } - if err := importResource(source, target, "models"); err != nil { + if err := importResource(source, target, "models", processor); err != nil { return err } return nil } -func importResource(source map[string]any, target map[string]any, key string) error { +func importResource(source map[string]any, target map[string]any, key string, processor PostProcessor) error { from := source[key] if from != nil { var to map[string]any @@ -193,13 +197,25 @@ func importResource(source map[string]any, target map[string]any, key string) er to = map[string]any{} } for name, a := range from.(map[string]any) { - if conflict, ok := to[name]; ok { - if reflect.DeepEqual(a, conflict) { - continue - } - return fmt.Errorf("%s.%s conflicts with imported resource", key, name) + conflict, ok := to[name] + if !ok { + to[name] = a + continue + } + err := processor.Apply(map[string]any{ + key: map[string]any{ + name: a, + }, + }) + if err != nil { + return err + } + + merged, err := override.MergeYaml(a, conflict, tree.NewPath(key, name)) + if err != nil { + return err } - to[name] = a + to[name] = merged } target[key] = to } diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go b/vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go index 491de5bdca2d..dc8dc7356d13 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go @@ -36,6 +36,8 @@ var interpolateTypeCastMapping = map[tree.Path]interp.Cast{ servicePath("cpus"): toFloat32, servicePath("cpu_shares"): toInt64, servicePath("init"): toBoolean, + servicePath("depends_on", tree.PathMatchAll, "required"): toBoolean, + servicePath("depends_on", tree.PathMatchAll, "restart"): toBoolean, servicePath("deploy", "replicas"): toInt, servicePath("deploy", "update_config", "parallelism"): toInt, servicePath("deploy", "update_config", "max_failure_ratio"): toFloat, diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go b/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go index 99b15364dd3d..f73ad92e80db 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go @@ -43,7 +43,7 @@ import ( "github.com/compose-spec/compose-go/v2/validation" "github.com/go-viper/mapstructure/v2" "github.com/sirupsen/logrus" - "go.yaml.in/yaml/v3" + "go.yaml.in/yaml/v4" ) // Options supported by Load @@ -427,7 +427,7 @@ func loadYamlFile(ctx context.Context, file.Content = content } - processRawYaml := func(raw interface{}, processors ...PostProcessor) error { + processRawYaml := func(raw interface{}, processor PostProcessor) error { converted, err := convertToStringKeysRecursive(raw, "") if err != nil { return err @@ -447,21 +447,19 @@ func loadYamlFile(ctx context.Context, fixEmptyNotNull(cfg) if !opts.SkipExtends { - err = ApplyExtends(ctx, cfg, opts, ct, processors...) + err = ApplyExtends(ctx, cfg, opts, ct, processor) if err != nil { return err } } - for _, processor := range processors { - if err := processor.Apply(dict); err != nil { - return err - } + if err := processor.Apply(dict); err != nil { + return err } if !opts.SkipInclude { included = append(included, file.Filename) - err = ApplyInclude(ctx, workingDir, environment, cfg, opts, included) + err = ApplyInclude(ctx, workingDir, environment, cfg, opts, included, processor) if err != nil { return err } @@ -519,7 +517,7 @@ func loadYamlFile(ctx context.Context, } } } else { - if err := processRawYaml(file.Config); err != nil { + if err := processRawYaml(file.Config, NoopPostProcessor{}); err != nil { return nil, nil, err } } diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/reset.go b/vendor/github.com/compose-spec/compose-go/v2/loader/reset.go index ea98dd9b3c17..ed1fc0c3f211 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/loader/reset.go +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/reset.go @@ -22,7 +22,7 @@ import ( "strings" "github.com/compose-spec/compose-go/v2/tree" - "go.yaml.in/yaml/v3" + "go.yaml.in/yaml/v4" ) type ResetProcessor struct { diff --git a/vendor/github.com/compose-spec/compose-go/v2/override/extends.go b/vendor/github.com/compose-spec/compose-go/v2/override/extends.go index f47912ddffc0..de92fd29e77c 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/override/extends.go +++ b/vendor/github.com/compose-spec/compose-go/v2/override/extends.go @@ -19,7 +19,7 @@ package override import "github.com/compose-spec/compose-go/v2/tree" func ExtendService(base, override map[string]any) (map[string]any, error) { - yaml, err := mergeYaml(base, override, tree.NewPath("services.x")) + yaml, err := MergeYaml(base, override, tree.NewPath("services.x")) if err != nil { return nil, err } diff --git a/vendor/github.com/compose-spec/compose-go/v2/override/merge.go b/vendor/github.com/compose-spec/compose-go/v2/override/merge.go index 6fae6e5fe167..525299cd272e 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/override/merge.go +++ b/vendor/github.com/compose-spec/compose-go/v2/override/merge.go @@ -26,7 +26,7 @@ import ( // Merge applies overrides to a config model func Merge(right, left map[string]any) (map[string]any, error) { - merged, err := mergeYaml(right, left, tree.NewPath()) + merged, err := MergeYaml(right, left, tree.NewPath()) if err != nil { return nil, err } @@ -70,8 +70,8 @@ func init() { mergeSpecials["services.*.ulimits.*"] = mergeUlimit } -// mergeYaml merges map[string]any yaml trees handling special rules -func mergeYaml(e any, o any, p tree.Path) (any, error) { +// MergeYaml merges map[string]any yaml trees handling special rules +func MergeYaml(e any, o any, p tree.Path) (any, error) { for pattern, merger := range mergeSpecials { if p.Matches(pattern) { merged, err := merger(e, o, p) @@ -110,7 +110,7 @@ func mergeMappings(mapping map[string]any, other map[string]any, p tree.Path) (m continue } next := p.Next(k) - merged, err := mergeYaml(e, v, next) + merged, err := MergeYaml(e, v, next) if err != nil { return nil, err } diff --git a/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json b/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json index 15e818dc606b..462de285c642 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json +++ b/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json @@ -121,6 +121,7 @@ "cache_from": {"type": "array", "items": {"type": "string"}, "description": "List of sources the image builder should use for cache resolution"}, "cache_to": {"type": "array", "items": {"type": "string"}, "description": "Cache destinations for the build cache."}, "no_cache": {"type": ["boolean", "string"], "description": "Do not use cache when building the image."}, + "no_cache_filter": {"$ref": "#/definitions/string_or_list", "description": "Do not use build cache for the specified stages."}, "additional_contexts": {"$ref": "#/definitions/list_or_dict", "description": "Additional build contexts to use, specified as a map of name to context path or URL."}, "network": {"type": "string", "description": "Network mode to use for the build. Options include 'default', 'none', 'host', or a network name."}, "provenance": {"type": ["string","boolean"], "description": "Add a provenance attestation"}, diff --git a/vendor/github.com/compose-spec/compose-go/v2/schema/schema.go b/vendor/github.com/compose-spec/compose-go/v2/schema/schema.go index 8aa7ccf627a8..a73eda245143 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/schema/schema.go +++ b/vendor/github.com/compose-spec/compose-go/v2/schema/schema.go @@ -19,6 +19,7 @@ package schema import ( // Enable support for embedded static resources _ "embed" + "encoding/json" "errors" "fmt" "slices" @@ -48,11 +49,11 @@ var Schema string // Validate uses the jsonschema to validate the configuration func Validate(config map[string]interface{}) error { compiler := jsonschema.NewCompiler() - json, err := jsonschema.UnmarshalJSON(strings.NewReader(Schema)) + shema, err := jsonschema.UnmarshalJSON(strings.NewReader(Schema)) if err != nil { return err } - err = compiler.AddResource("compose-spec.json", json) + err = compiler.AddResource("compose-spec.json", shema) if err != nil { return err } @@ -61,7 +62,21 @@ func Validate(config map[string]interface{}) error { Validate: durationFormatChecker, }) schema := compiler.MustCompile("compose-spec.json") - err = schema.Validate(config) + + // santhosh-tekuri doesn't allow derived types + // see https://github.com/santhosh-tekuri/jsonschema/pull/240 + marshaled, err := json.Marshal(config) + if err != nil { + return err + } + + var raw map[string]interface{} + err = json.Unmarshal(marshaled, &raw) + if err != nil { + return err + } + + err = schema.Validate(raw) var verr *jsonschema.ValidationError if ok := errors.As(err, &verr); ok { return validationError{getMostSpecificError(verr)} diff --git a/vendor/github.com/compose-spec/compose-go/v2/template/template.go b/vendor/github.com/compose-spec/compose-go/v2/template/template.go index 2d48188dd759..beb61ed803d8 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/template/template.go +++ b/vendor/github.com/compose-spec/compose-go/v2/template/template.go @@ -309,12 +309,12 @@ func withDefaultWhenPresence(substitution string, mapping Mapping, notEmpty bool return "", false, nil } name, defaultValue := partition(substitution, sep) - defaultValue, err := Substitute(defaultValue, mapping) - if err != nil { - return "", false, err - } value, ok := mapping(name) if ok && (!notEmpty || (notEmpty && value != "")) { + defaultValue, err := Substitute(defaultValue, mapping) + if err != nil { + return "", false, err + } return defaultValue, true, nil } return value, true, nil @@ -329,12 +329,12 @@ func withDefaultWhenAbsence(substitution string, mapping Mapping, emptyOrUnset b return "", false, nil } name, defaultValue := partition(substitution, sep) - defaultValue, err := Substitute(defaultValue, mapping) - if err != nil { - return "", false, err - } value, ok := mapping(name) if !ok || (emptyOrUnset && value == "") { + defaultValue, err := Substitute(defaultValue, mapping) + if err != nil { + return "", false, err + } return defaultValue, true, nil } return value, true, nil @@ -345,12 +345,12 @@ func withRequired(substitution string, mapping Mapping, sep string, valid func(s return "", false, nil } name, errorMessage := partition(substitution, sep) - errorMessage, err := Substitute(errorMessage, mapping) - if err != nil { - return "", false, err - } value, ok := mapping(name) if !ok || !valid(value) { + errorMessage, err := Substitute(errorMessage, mapping) + if err != nil { + return "", false, err + } return "", true, &MissingRequiredError{ Reason: errorMessage, Variable: name, diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go b/vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go index 5d305a1de62d..b82da69471a8 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go @@ -29,6 +29,7 @@ func init() { DefaultValues["services.*.ports.*"] = portDefaults DefaultValues["services.*.deploy.resources.reservations.devices.*"] = deviceRequestDefaults DefaultValues["services.*.gpus.*"] = deviceRequestDefaults + DefaultValues["services.*.volumes.*.bind"] = defaultVolumeBind } // RegisterDefaultValue registers a custom transformer for the given path pattern diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/volume.go b/vendor/github.com/compose-spec/compose-go/v2/transform/volume.go index b08e8b1aaa9a..6aa59cf15ba8 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/transform/volume.go +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/volume.go @@ -50,3 +50,14 @@ func cleanTarget(target string) string { } return path.Clean(target) } + +func defaultVolumeBind(data any, p tree.Path, _ bool) (any, error) { + bind, ok := data.(map[string]any) + if !ok { + return data, fmt.Errorf("%s: invalid type %T for service volume bind", p, data) + } + if _, ok := bind["create_host_path"]; !ok { + bind["create_host_path"] = true + } + return bind, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/build.go b/vendor/github.com/compose-spec/compose-go/v2/types/build.go new file mode 100644 index 000000000000..98931400754a --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/build.go @@ -0,0 +1,48 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +// BuildConfig is a type for build +type BuildConfig struct { + Context string `yaml:"context,omitempty" json:"context,omitempty"` + Dockerfile string `yaml:"dockerfile,omitempty" json:"dockerfile,omitempty"` + DockerfileInline string `yaml:"dockerfile_inline,omitempty" json:"dockerfile_inline,omitempty"` + Entitlements []string `yaml:"entitlements,omitempty" json:"entitlements,omitempty"` + Args MappingWithEquals `yaml:"args,omitempty" json:"args,omitempty"` + Provenance string `yaml:"provenance,omitempty" json:"provenance,omitempty"` + SBOM string `yaml:"sbom,omitempty" json:"sbom,omitempty"` + SSH SSHConfig `yaml:"ssh,omitempty" json:"ssh,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + CacheFrom StringList `yaml:"cache_from,omitempty" json:"cache_from,omitempty"` + CacheTo StringList `yaml:"cache_to,omitempty" json:"cache_to,omitempty"` + NoCache bool `yaml:"no_cache,omitempty" json:"no_cache,omitempty"` + NoCacheFilter StringList `yaml:"no_cache_filter,omitempty" json:"no_cache_filter,omitempty"` + AdditionalContexts Mapping `yaml:"additional_contexts,omitempty" json:"additional_contexts,omitempty"` + Pull bool `yaml:"pull,omitempty" json:"pull,omitempty"` + ExtraHosts HostsList `yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` + Isolation string `yaml:"isolation,omitempty" json:"isolation,omitempty"` + Network string `yaml:"network,omitempty" json:"network,omitempty"` + Target string `yaml:"target,omitempty" json:"target,omitempty"` + Secrets []ServiceSecretConfig `yaml:"secrets,omitempty" json:"secrets,omitempty"` + ShmSize UnitBytes `yaml:"shm_size,omitempty" json:"shm_size,omitempty"` + Tags StringList `yaml:"tags,omitempty" json:"tags,omitempty"` + Ulimits map[string]*UlimitsConfig `yaml:"ulimits,omitempty" json:"ulimits,omitempty"` + Platforms StringList `yaml:"platforms,omitempty" json:"platforms,omitempty"` + Privileged bool `yaml:"privileged,omitempty" json:"privileged,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/command.go b/vendor/github.com/compose-spec/compose-go/v2/types/command.go index 7e56ce238bcf..559dc3050d37 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/types/command.go +++ b/vendor/github.com/compose-spec/compose-go/v2/types/command.go @@ -34,7 +34,7 @@ import "github.com/mattn/go-shellwords" // preserved so that it can override any base value (e.g. container entrypoint). // // The different semantics between YAML and JSON are due to limitations with -// JSON marshaling + `omitempty` in the Go stdlib, while go.yaml.in/yaml/v3 gives +// JSON marshaling + `omitempty` in the Go stdlib, while go.yaml.in/yaml/v4 gives // us more flexibility via the yaml.IsZeroer interface. // // In the future, it might make sense to make fields of this type be @@ -58,7 +58,7 @@ func (s ShellCommand) IsZero() bool { // accurately if the `omitempty` struct tag is omitted/forgotten. // // A similar MarshalJSON() implementation is not needed because the Go stdlib -// already serializes nil slices to `null`, whereas go.yaml.in/yaml/v3 by default +// already serializes nil slices to `null`, whereas go.yaml.in/yaml/v4 by default // serializes nil slices to `[]`. func (s ShellCommand) MarshalYAML() (interface{}, error) { if s == nil { diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go b/vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go index 3857b4bb2cb9..e284fa9f512e 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go +++ b/vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go @@ -938,6 +938,24 @@ func deriveDeepCopy_6(dst, src *BuildConfig) { copy(dst.CacheTo, src.CacheTo) } dst.NoCache = src.NoCache + if src.NoCacheFilter == nil { + dst.NoCacheFilter = nil + } else { + if dst.NoCacheFilter != nil { + if len(src.NoCacheFilter) > len(dst.NoCacheFilter) { + if cap(dst.NoCacheFilter) >= len(src.NoCacheFilter) { + dst.NoCacheFilter = (dst.NoCacheFilter)[:len(src.NoCacheFilter)] + } else { + dst.NoCacheFilter = make([]string, len(src.NoCacheFilter)) + } + } else if len(src.NoCacheFilter) < len(dst.NoCacheFilter) { + dst.NoCacheFilter = (dst.NoCacheFilter)[:len(src.NoCacheFilter)] + } + } else { + dst.NoCacheFilter = make([]string, len(src.NoCacheFilter)) + } + copy(dst.NoCacheFilter, src.NoCacheFilter) + } if src.AdditionalContexts != nil { dst.AdditionalContexts = make(map[string]string, len(src.AdditionalContexts)) deriveDeepCopy_5(dst.AdditionalContexts, src.AdditionalContexts) diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/envfile.go b/vendor/github.com/compose-spec/compose-go/v2/types/envfile.go index 1348f132dd3e..a7d239ee8bbb 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/types/envfile.go +++ b/vendor/github.com/compose-spec/compose-go/v2/types/envfile.go @@ -16,32 +16,8 @@ package types -import ( - "encoding/json" -) - type EnvFile struct { Path string `yaml:"path,omitempty" json:"path,omitempty"` - Required bool `yaml:"required" json:"required"` + Required OptOut `yaml:"required,omitempty" json:"required,omitzero"` Format string `yaml:"format,omitempty" json:"format,omitempty"` } - -// MarshalYAML makes EnvFile implement yaml.Marshaler -func (e EnvFile) MarshalYAML() (interface{}, error) { - if e.Required { - return e.Path, nil - } - return map[string]any{ - "path": e.Path, - "required": e.Required, - }, nil -} - -// MarshalJSON makes EnvFile implement json.Marshaler -func (e *EnvFile) MarshalJSON() ([]byte, error) { - if e.Required { - return json.Marshal(e.Path) - } - // Pass as a value to avoid re-entering this method and use the default implementation - return json.Marshal(*e) -} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/project.go b/vendor/github.com/compose-spec/compose-go/v2/types/project.go index a0d363cfefe4..58330e8df74c 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/types/project.go +++ b/vendor/github.com/compose-spec/compose-go/v2/types/project.go @@ -32,7 +32,7 @@ import ( "github.com/compose-spec/compose-go/v2/utils" "github.com/distribution/reference" godigest "github.com/opencontainers/go-digest" - "go.yaml.in/yaml/v3" + "go.yaml.in/yaml/v4" "golang.org/x/sync/errgroup" ) diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/types.go b/vendor/github.com/compose-spec/compose-go/v2/types/types.go index b73d934019e9..fd4f35136a72 100644 --- a/vendor/github.com/compose-spec/compose-go/v2/types/types.go +++ b/vendor/github.com/compose-spec/compose-go/v2/types/types.go @@ -302,36 +302,6 @@ func (s ServiceConfig) GetPullPolicy() (string, time.Duration, error) { } } -// BuildConfig is a type for build -type BuildConfig struct { - Context string `yaml:"context,omitempty" json:"context,omitempty"` - Dockerfile string `yaml:"dockerfile,omitempty" json:"dockerfile,omitempty"` - DockerfileInline string `yaml:"dockerfile_inline,omitempty" json:"dockerfile_inline,omitempty"` - Entitlements []string `yaml:"entitlements,omitempty" json:"entitlements,omitempty"` - Args MappingWithEquals `yaml:"args,omitempty" json:"args,omitempty"` - Provenance string `yaml:"provenance,omitempty" json:"provenance,omitempty"` - SBOM string `yaml:"sbom,omitempty" json:"sbom,omitempty"` - SSH SSHConfig `yaml:"ssh,omitempty" json:"ssh,omitempty"` - Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` - CacheFrom StringList `yaml:"cache_from,omitempty" json:"cache_from,omitempty"` - CacheTo StringList `yaml:"cache_to,omitempty" json:"cache_to,omitempty"` - NoCache bool `yaml:"no_cache,omitempty" json:"no_cache,omitempty"` - AdditionalContexts Mapping `yaml:"additional_contexts,omitempty" json:"additional_contexts,omitempty"` - Pull bool `yaml:"pull,omitempty" json:"pull,omitempty"` - ExtraHosts HostsList `yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` - Isolation string `yaml:"isolation,omitempty" json:"isolation,omitempty"` - Network string `yaml:"network,omitempty" json:"network,omitempty"` - Target string `yaml:"target,omitempty" json:"target,omitempty"` - Secrets []ServiceSecretConfig `yaml:"secrets,omitempty" json:"secrets,omitempty"` - ShmSize UnitBytes `yaml:"shm_size,omitempty" json:"shm_size,omitempty"` - Tags StringList `yaml:"tags,omitempty" json:"tags,omitempty"` - Ulimits map[string]*UlimitsConfig `yaml:"ulimits,omitempty" json:"ulimits,omitempty"` - Platforms StringList `yaml:"platforms,omitempty" json:"platforms,omitempty"` - Privileged bool `yaml:"privileged,omitempty" json:"privileged,omitempty"` - - Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` -} - // BlkioConfig define blkio config type BlkioConfig struct { Weight uint16 `yaml:"weight,omitempty" json:"weight,omitempty"` @@ -594,12 +564,20 @@ const ( type ServiceVolumeBind struct { SELinux string `yaml:"selinux,omitempty" json:"selinux,omitempty"` Propagation string `yaml:"propagation,omitempty" json:"propagation,omitempty"` - CreateHostPath bool `yaml:"create_host_path,omitempty" json:"create_host_path,omitempty"` + CreateHostPath OptOut `yaml:"create_host_path,omitempty" json:"create_host_path,omitzero"` Recursive string `yaml:"recursive,omitempty" json:"recursive,omitempty"` Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` } +// OptOut is a boolean which default value is 'true' +type OptOut bool + +func (o OptOut) IsZero() bool { + // Attribute can be omitted if value is true + return bool(o) +} + // SELinux represents the SELinux re-labeling options. const ( // SELinuxShared option indicates that the bind mount content is shared among multiple containers diff --git a/vendor/go.yaml.in/yaml/v4/.gitignore b/vendor/go.yaml.in/yaml/v4/.gitignore new file mode 100644 index 000000000000..ae2880f5f249 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/.gitignore @@ -0,0 +1,10 @@ +# Copyright 2025 The go-yaml Project Contributors +# SPDX-License-Identifier: Apache-2.0 + +/.cache/ +/.claude/ +/CLAUDE.md +/yts/testdata/ +/go-yaml +/note/ +/*.yaml diff --git a/vendor/go.yaml.in/yaml/v4/.typos.toml b/vendor/go.yaml.in/yaml/v4/.typos.toml new file mode 100644 index 000000000000..59f933115835 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/.typos.toml @@ -0,0 +1,33 @@ +# Copyright 2025 The go-yaml Project Contributors +# SPDX-License-Identifier: Apache-2.0 + +# This is the configuration file of typos (spell checker) +# https://github.com/crate-ci/typos + +[files] +# excluded file +extend-exclude = [ + "yts/testdata", # third-party test data +] + +# setting for Go files configuration +[type.go] +extend-ignore-re = [ + 'ba-dum-tss\W+', # this one can be found in test files + '"yYnNtTfFoO', # this one can be found in test files + 'ba\?r', # this one can be found in test files +] + +[type.go.extend-words] +# Here is a list of words we want to ignore in Go files +typ = "typ" # commonly used abbreviation for "type" in Go as "type" is a reserved identifier + +# setting for YAML files configuration +[type.yaml] +extend-ignore-re = [ + 'ba\?r', # this one can be found in test files +] + +[default.extend-words] +caf = "caf" # part of "café" shown as "caf\u00e9" in Unicode escape examples +deprecat = "deprecat" # Used as part of a command in a docs/ file diff --git a/vendor/go.yaml.in/yaml/v4/CONTRIBUTING.md b/vendor/go.yaml.in/yaml/v4/CONTRIBUTING.md new file mode 100644 index 000000000000..6aae23b57649 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/CONTRIBUTING.md @@ -0,0 +1,210 @@ +Contributing to go-yaml +======================= + +Thank you for your interest in contributing to go-yaml! + +This document provides guidelines and instructions for contributing to this +project. + + +## Code of Conduct + +By participating in this project, you agree to follow our Code of Conduct. + +We expect all contributors to: + +- Be respectful and inclusive +- Use welcoming and inclusive language +- Be collaborative and constructive +- Focus on what is best for both the Go and YAML communities + + +## How to Contribute + + +### Reporting Issues + +Before submitting an issue, please: + +- Check if the issue already exists in our issue tracker +- Use a clear and descriptive title +- Provide detailed steps to reproduce the issue +- Include relevant code samples and error messages +- Specify your Go version and operating system +- Use the `go-yaml` CLI tool described below + + +### Using the `go-yaml` CLI Tool + +This tool can be used to inspect both the internal stages and final results of +YAML processing with the go-yaml library. +It should be used when reporting most bugs. + +The `go-yaml` CLI tool uses the `go.yaml.in/yaml/v4` library to decode and +encode YAML. +Decoding YAML is a multi-stage process that involves tokens, events, and nodes. +The `go-yaml` CLI tool lets you see all of these intermediate stages of the +decoding process. +This is crucial for understanding what go-yaml is doing internally. + +The `go-yaml` CLI tool can be built with the `make go-yaml` command or installed +with the `go install go.yaml.in/yaml/v4/cmd/go-yaml@latest` command. + +You can learn about all of its options with the `go-yaml -h` command. + +Here is an example of using it on a small piece of YAML: + +```bash +./go-yaml -t <<< ' +foo: &a1 bar +*a1: baz +``` + + +### Coding Conventions + +- Follow standard Go coding conventions +- Use `make fmt` to format your code +- Write descriptive comments for non-obvious code +- Add tests for your work +- Keep line length to 80 characters +- Use meaningful variable and function names +- Start doc and comment sentences on a new line +- Test your changes with the `go-yaml` CLI tool when working on parsing logic + + +### Commit Conventions + +- No merge commits +- Commit subject line should: + - Start with a capital letter + - Not end with a period + - Be no more than 50 characters + + +### Pull Requests + +1. Fork the repository +1. Create a new branch for your changes +1. Make your changes following our coding conventions + - If you are not sure about the coding conventions, please ask + - Look at the existing code for examples +1. Write clear commit messages +1. Update tests and documentation +1. Submit a pull request + + +### Testing + +- Ensure all tests pass with `make test` +- Add new tests for new functionality +- Update existing tests when modifying functionality + + +## Development Process + +- This project makes use of a GNU makefile (`GNUmakefile`) for many dev tasks +- The makefile doesn't use your locally installed Go commands; it auto-installs + them, so that all results are deterministic +- Fork and clone the repository +- Make your changes +- Run tests, linters and formatters + - `make fmt` + - `make tidy` + - `make lint` + - `make test` + - You can use `make check` to run all of the above +- Submit a [Pull Request](https://github.com/yaml/go-yaml/pulls) + + +### Using Your Own Go with the Makefile + +We ask that you always test with the makefile installed Go before committing, +since it is deterministic and uses the exact same flow as the go-yaml CI. + +We also realize that many Go devs need to run their locally installed Go +commands for their development environment, and might want to use them with +the go-yaml makefile. + +If you need to use your own Go utils with the makefile, set `GO_YAML_PATH` to +the directory(s) containing them (either by exporting it or passing it to +`make`). + +Something like this: + +```bash +export GO_YAML_PATH=$(dirname "$(command -v go)") +make test +# or +make test GO_YAML_PATH=$(dirname "$(command -v go)") +``` + +**Note:** `GO-VERSION` and `GO_YAML_PATH` are mutually exclusive. +When `GO_YAML_PATH` is set, the makefile uses your own Go environment and +ignores any `GO-VERSION` setting. + + +### Using the Makefile Environment as a Shell + +Sometimes you might want to run your own shell commands using the same binaries +that the makefile installs. + +To get a subshell with this environment, run one of: + +```bash +make shell +make bash +make zsh +make shell GO-VERSION=1.23.4 +``` + + + +## Makefile Targets + +The repository's makefile (`GNUmakefile`) provides a number of useful targets: + +- `make test` runs all tests including yaml-test-suite tests +- `make test-unit` runs just the unit tests +- `make test-internal` runs just the internal tests +- `make test-yts` runs just the yaml-test-suite tests +- `make test v=1 count=3` runs the tests with options +- `make test GO-VERSION=1.23.4` runs the tests with a specific Go version +- `make test GO_YAML_PATH=/path/to/go/bin` uses your own Go installation +- `make shell` opens a shell with the project's dependencies set up +- `make shell GO-VERSION=1.23.4` opens a shell with a specific Go version +- `make fmt` runs `golangci-lint fmt ./...` +- `make lint` runs `golangci-lint run` +- `make tidy` runs `go mod tidy` +- `make distclean` cleans the project completely + + +## Getting Help + +If you need help, you can: +- Open an [issue](https://github.com/yaml/go-yaml/issues) with your question +- Start a [discussion](https://github.com/yaml/go-yaml/discussions) +- Read through our [documentation](https://pkg.go.dev/go.yaml.in/yaml/v4) +- Check the [migration guide](docs/v3-to-v4-migration.md) if upgrading from v3 +- Join our [Slack channel](https://cloud-native.slack.com/archives/C08PPAT8PS7) + + +## We are a Work in Progress + +This project is very much a team effort. +We are just getting things rolling and trying to get the foundations in place. +There are lots of opinions and ideas about how to do things, even within the +core team. + +Once our process is more mature, we will likely change the rules here. +We'll make the new rules as a team. +For now, please stick to the rules as they are. + +This project is focused on serving the needs of both the Go and YAML +communities. +Sometimes those needs can be in conflict, but we'll try to find common ground. + + +## Thank You + +Thank you for contributing to go-yaml! diff --git a/vendor/go.yaml.in/yaml/v4/GNUmakefile b/vendor/go.yaml.in/yaml/v4/GNUmakefile new file mode 100644 index 000000000000..fb46f814c737 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/GNUmakefile @@ -0,0 +1,191 @@ +# Copyright 2025 The go-yaml Project Contributors +# SPDX-License-Identifier: Apache-2.0 + +# Auto-install https://github.com/makeplus/makes at specific commit: +MAKES := .cache/makes +MAKES-LOCAL := .cache/local +MAKES-COMMIT ?= 4e48a743c3652b88adc4a257398d895a801e6d11 +$(shell [ -d $(MAKES) ] || ( \ + git clone -q https://github.com/makeplus/makes $(MAKES) && \ + git -C $(MAKES) reset -q --hard $(MAKES-COMMIT))) +ifneq ($(shell git -C $(MAKES) rev-parse HEAD), \ + $(shell git -C $(MAKES) rev-parse $(MAKES-COMMIT))) +$(error $(MAKES) is not at the correct commit: $(MAKES-COMMIT). \ + Remove $(MAKES) and try again.) +endif + +include $(MAKES)/init.mk +include $(MAKES)/shellcheck.mk + +# Auto-install go unless GO_YAML_PATH is set: +ifdef GO_YAML_PATH +override export PATH := $(GO_YAML_PATH):$(PATH) +else +GO-VERSION ?= 1.25.5 +endif +GO-VERSION-NEEDED := $(GO-VERSION) + +# yaml-test-suite info: +YTS-URL ?= https://github.com/yaml/yaml-test-suite +YTS-TAG ?= data-2022-01-17 +YTS-DIR := yts/testdata/$(YTS-TAG) + +CLI-BINARY := go-yaml + +# Pager for viewing documentation: +PAGER ?= less -FRX + +# Setup and include go.mk and shell.mk: + +# We need to limit `find` to avoid dirs like `.cache/` and any git worktrees, +# as this makes `make` operations very slow: +REPO-DIRS := $(shell find * -maxdepth 0 -type d \ + ! -exec test -f {}/.git \; -print) +GO-FILES := $(shell find $(REPO-DIRS) -name '*.go') + +ifndef GO-VERSION-NEEDED +GO-NO-DEP-GO := true +endif + +include $(MAKES)/go.mk + +# Set this from the `make` command to override: +GOLANGCI-LINT-VERSION ?= v2.8.0 +GOLANGCI-LINT-INSTALLER := \ + https://github.com/golangci/golangci-lint/raw/main/install.sh +GOLANGCI-LINT := $(LOCAL-BIN)/golangci-lint +GOLANGCI-LINT-VERSIONED := $(GOLANGCI-LINT)-$(GOLANGCI-LINT-VERSION) + +SHELL-DEPS += $(GOLANGCI-LINT-VERSIONED) + +ifdef GO-VERSION-NEEDED +GO-DEPS += $(GO) +else +SHELL-DEPS := $(filter-out $(GO),$(SHELL-DEPS)) +endif + +SHELL-NAME := makes go-yaml +include $(MAKES)/clean.mk +include $(MAKES)/shell.mk + +MAKES-CLEAN := $(CLI-BINARY) $(GOLANGCI-LINT) +MAKES-REALCLEAN := $(dir $(YTS-DIR)) + +SHELL-SCRIPTS = \ + util/common.bash \ + $(shell grep -rl '^.!/usr/bin/env bash' util | \ + grep -v '\.sw') + +COVER-TESTS := \ + . \ + ./cmd/... \ + ./internal/... \ + +# v=1 for verbose +MAKE := $(MAKE) --no-print-directory + +v ?= +cover ?= +fuzz ?= +time ?= 60s +opts ?= + +TEST-OPTS := \ +$(if $v, -v)\ +$(if $(cover), --cover)\ +$(if $(fuzz), --fuzz=FuzzEncodeFromJSON --fuzztime=$(time))\ +$(if $(opts), $(opts))\ + +# Test rules: +test: test-main test-internal test-cmd test-yts-all test-shell + @echo 'ALL TESTS PASS' + +check: + $(MAKE) fmt + $(MAKE) tidy + $(MAKE) lint + $(MAKE) test + +test-main: $(GO-DEPS) + go test .$(TEST-OPTS) + @echo 'ALL MAIN FILES PASS' + +test-cmd: $(GO-DEPS) + go test ./cmd/...$(TEST-OPTS) + @echo 'ALL CMD FILES PASS' + +test-internal: $(GO-DEPS) + go test ./internal/...$(TEST-OPTS) + @echo 'ALL INTERNAL FILES PASS' + +test-cover: $(GO-DEPS) + go test . $(COVER-TESTS) -vet=off --cover$(TEST-OPTS) + +test-yts: $(GO-DEPS) $(YTS-DIR) + go test ./yts$(TEST-OPTS) + +test-yts-all: $(GO-DEPS) $(YTS-DIR) + @echo 'Testing yaml-test-suite' + util/yaml-test-suite all + +test-yts-fail: $(GO-DEPS) $(YTS-DIR) + @echo 'Testing yaml-test-suite failures' + util/yaml-test-suite fail + +test-shell: $(SHELLCHECK) + shellcheck $(SHELL-SCRIPTS) + @echo 'ALL SHELL FILES PASS' + +test-count: $(GO-DEPS) + util/test-count + +yts-dir: $(YTS-DIR) + +get-test-data: $(YTS-DIR) + +# Install golangci-lint for GitHub Actions: +golangci-lint-install: $(GOLANGCI-LINT) + +fmt: $(GOLANGCI-LINT-VERSIONED) + $< fmt ./... + +lint: $(GOLANGCI-LINT-VERSIONED) + $< run ./... + +tidy: $(GO-DEPS) + go mod tidy + +cli: $(CLI-BINARY) + +$(CLI-BINARY): $(GO) + go build -o $@ ./cmd/$@ + +run-examples: $(GO) + @for dir in example/*/; do \ + (set -x; go run "$${dir}main.go") || \ + { echo "$$dir failed"; break; }; \ + done + +# CLI documentation (go doc) - view in terminal: +doc: $(GO-DEPS) + @go doc -all . | $(PAGER) + +# HTTP documentation server - opens browser: +doc-http: $(GO-DEPS) + go doc -http -all + +# Setup rules: +$(YTS-DIR): + git clone -q $(YTS-URL) $@ + git -C $@ checkout -q $(YTS-TAG) + +# Downloads golangci-lint binary and moves to versioned path +# (.cache/local/bin/golangci-lint-). +$(GOLANGCI-LINT-VERSIONED): $(GO-DEPS) + curl -sSfL $(GOLANGCI-LINT-INSTALLER) | \ + bash -s -- -b $(LOCAL-BIN) $(GOLANGCI-LINT-VERSION) + mv $(GOLANGCI-LINT) $@ + +# Moves golangci-lint- to golangci-lint for CI requirement +$(GOLANGCI-LINT): $(GOLANGCI-LINT-VERSIONED) + cp $< $@ diff --git a/vendor/go.yaml.in/yaml/v4/LICENSE b/vendor/go.yaml.in/yaml/v4/LICENSE new file mode 100644 index 000000000000..b0fa97112a67 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2025 - The go-yaml Project Contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.yaml.in/yaml/v4/NOTICE b/vendor/go.yaml.in/yaml/v4/NOTICE new file mode 100644 index 000000000000..15093073b93a --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/NOTICE @@ -0,0 +1,21 @@ +The following files were ported to Go from C files of libyaml, and thus are +still covered by their original MIT license, with the additional copyright +starting in 2011 when the project was ported over: + +- internal/libyaml/api.go +- internal/libyaml/emitter.go +- internal/libyaml/parser.go +- internal/libyaml/reader.go +- internal/libyaml/scanner.go +- internal/libyaml/writer.go +- internal/libyaml/yaml.go +- internal/libyaml/yamlprivate.go + +Copyright 2006-2010 Kirill Simonov +https://opensource.org/license/mit + +All the remaining project files are covered by the Apache license: + +Copyright 2011-2019 Canonical Ltd +Copyright 2025 The go-yaml Project Contributors +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/vendor/go.yaml.in/yaml/v4/README.md b/vendor/go.yaml.in/yaml/v4/README.md new file mode 100644 index 000000000000..aef27985a525 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/README.md @@ -0,0 +1,266 @@ +go.yaml.in/yaml +=============== + +YAML Support for the Go Language + + +## Introduction + +The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode +and decode [YAML](https://yaml.org/) values. + +It was originally developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go +port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to +parse and generate YAML data quickly and reliably. + + +## Project Status + +This project started as a fork of the extremely popular [go-yaml]( +https://github.com/go-yaml/yaml/) +project, and is being maintained by the official [YAML organization]( +https://github.com/yaml/). + +The YAML team took over ongoing maintenance and development of the project after +discussion with go-yaml's author, @niemeyer, following his decision to +[label the project repository as "unmaintained"]( +https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025. + +We have put together a team of dedicated maintainers including representatives +of go-yaml's most important downstream projects. + +We will strive to earn the trust of the various go-yaml forks to switch back to +this repository as their upstream. + +Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you +would like to contribute or be involved. + + +### Version Intentions + +Versions `v1`, `v2`, and `v3` will remain as **frozen legacy**. +They will receive **security-fixes only** so that existing consumers keep +working without breaking changes. + +All ongoing work, including new features and routine bug-fixes, will happen in +**`v4`**. +If you’re starting a new project or upgrading an existing one, please use the +`go.yaml.in/yaml/v4` import path. + + +## Compatibility + +The `yaml` package supports most of YAML 1.2, but preserves some behavior from +1.1 for backwards compatibility. + +Specifically, v3 of the `yaml` package: + +* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being + decoded into a typed bool value. + Otherwise they behave as a string. + Booleans in YAML 1.2 are `true`/`false` only. +* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than + `0o777` as specified in YAML 1.2, because most parsers still use the old + format. + Octals in the `0o777` format are supported though, so new files work. +* Does not support base-60 floats. + These are gone from YAML 1.2, and were actually never supported by this + package as it's clearly a poor choice. + + +## Installation and Usage + +The import path for the package is *go.yaml.in/yaml/v4*. + +To install it, run: + +```bash +go get go.yaml.in/yaml/v4 +``` + + +## API Documentation + +See: + + +## API Stability + +The package API for yaml v3 will remain stable as described in [gopkg.in]( +https://gopkg.in). + + +## Example + +```go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v4" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[any]any) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + + +## Development and Testing with `make` + +This project's makefile (`GNUmakefile`) is set up to support all of the +project's testing, automation and development tasks in a completely +deterministic way. + +Some `make` commands are: + +* `make test` +* `make lint tidy` +* `make test-shell` +* `make test v=1` +* `make test o='-foo --bar=baz'` # Add extra CLI options +* `make test GO-VERSION=1.2.34` +* `make test GO_YAML_PATH=/usr/local/go/bin` +* `make shell` # Start a shell with the local `go` environment +* `make shell GO-VERSION=1.2.34` +* `make distclean` # Remove all generated files including `.cache/` + + +### Dependency Auto-install + +By default, this makefile will not use your system's Go installation, or any +other system tools that it needs. + +The only things from your system that it relies on are: +* Linux or macOS +* GNU `make` (3.81+) +* `git` +* `bash` +* `curl` + +Everything else, including Go and Go utils, are installed and cached as they +are needed by the makefile (under `.cache/`). + +> **Note**: Use `make shell` to get a subshell with the same environment that +> the makefile set up for its commands. + + +### Using your own Go + +If you want to use your own Go installation and utils, export `GO_YAML_PATH` to +the directory containing the `go` binary. + +Use something like this: + +``` +export GO_YAML_PATH=$(dirname "$(command -v go)") +make +# or: +make GO_YAML_PATH=$(dirname "$(command -v go)") +``` + +> **Note:** `GO-VERSION` and `GO_YAML_PATH` are mutually exclusive. +> When `GO_YAML_PATH` is set, the Makefile uses your own Go installation and +> ignores any `GO-VERSION` setting. + + +## The `go-yaml` CLI Tool + +This repository includes a `go-yaml` CLI tool which can be used to understand +the internal stages and final results of YAML processing with the go-yaml +library. + +We strongly encourage you to show pertinent output from this command when +reporting and discussing issues. + +```bash +make go-yaml +./go-yaml --help +./go-yaml <<< ' +foo: &a1 bar +*a1: baz +' -n # Show value on decoded Node structs (formatted in YAML) +``` + +You can also install it with: + +```bash +go install go.yaml.in/yaml/v4/cmd/go-yaml@latest +``` + + +## License + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. diff --git a/vendor/go.yaml.in/yaml/v4/doc.go b/vendor/go.yaml.in/yaml/v4/doc.go new file mode 100644 index 000000000000..9e9ac7283bc3 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/doc.go @@ -0,0 +1,115 @@ +// +// Copyright (c) 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 +// + +// Package yaml implements YAML 1.1/1.2 encoding and decoding for Go programs. +// +// # Quick Start +// +// For simple encoding and decoding, use [Unmarshal] and [Marshal]: +// +// type Config struct { +// Name string `yaml:"name"` +// Version string `yaml:"version"` +// } +// +// // Decode YAML to Go struct +// var config Config +// err := yaml.Unmarshal(yamlData, &config) +// +// // Encode Go struct to YAML +// data, err := yaml.Marshal(&config) +// +// For encoding/decoding with options, use [Load] and [Dump]: +// +// // Decode with strict field checking +// err := yaml.Load(data, &config, yaml.WithKnownFields()) +// +// // Encode with custom indent +// data, err := yaml.Dump(&config, yaml.WithIndent(2)) +// +// // Decode all documents from multi-document stream +// var docs []Config +// err := yaml.Load(multiDocYAML, &docs, yaml.WithAllDocuments()) +// +// // Encode multiple documents as multi-document stream +// docs := []Config{config1, config2} +// data, err := yaml.Dump(docs, yaml.WithAllDocuments()) +// +// # Streaming with Loader and Dumper +// +// For multi-document streams or when you need custom options, use [Loader] and [Dumper]: +// +// // Load multiple documents from a stream +// loader, err := yaml.NewLoader(reader) +// if err != nil { +// log.Fatal(err) +// } +// for { +// var doc any +// if err := loader.Load(&doc); err == io.EOF { +// break +// } else if err != nil { +// log.Fatal(err) +// } +// // Process document... +// } +// +// // Dump multiple documents to a stream +// dumper, err := yaml.NewDumper(writer, yaml.WithIndent(2)) +// if err != nil { +// log.Fatal(err) +// } +// dumper.Dump(&doc1) +// dumper.Dump(&doc2) +// dumper.Close() +// +// # Options System +// +// Configure YAML processing behavior with functional options: +// +// yaml.NewDumper(w, +// yaml.WithIndent(2), // Indentation spacing +// yaml.WithCompactSeqIndent(), // Compact sequences (defaults to true) +// yaml.WithLineWidth(80), // Line wrapping width +// yaml.WithUnicode(false), // Escape non-ASCII (override default true) +// yaml.WithKnownFields(), // Strict field checking (defaults to true) +// yaml.WithUniqueKeys(), // Prevent duplicate keys (defaults to true) +// yaml.WithSingleDocument(), // Single document mode +// ) +// +// Or use version-specific option presets for consistent formatting: +// +// yaml.NewDumper(w, yaml.V3) +// +// Options can be combined and later options override earlier ones: +// +// // Start with v3 defaults, then override indent +// yaml.NewDumper(w, +// yaml.V3, +// yaml.WithIndent(4), +// ) +// +// Load options from YAML configuration files: +// +// opts, err := yaml.OptsYAML(configYAML) +// dumper, err := yaml.NewDumper(w, opts) +// +// # YAML Compatibility +// +// This package supports most of YAML 1.2, but preserves some YAML 1.1 +// behavior for backward compatibility: +// +// - YAML 1.1 booleans (yes/no, on/off) are supported when decoding into +// typed bool values, otherwise treated as strings +// - Octals can use 0777 format (YAML 1.1) or 0o777 format (YAML 1.2) +// - Base-60 floats are not supported (removed in YAML 1.2) +// +// # Version Defaults +// +// [NewLoader] and [NewDumper] use v4 defaults (2-space indentation, compact +// sequences). The older [Marshal] and [Unmarshal] functions use v3 defaults +// for backward compatibility. Use the options system to select different +// version defaults if needed. +package yaml diff --git a/vendor/go.yaml.in/yaml/v4/dumper.go b/vendor/go.yaml.in/yaml/v4/dumper.go new file mode 100644 index 000000000000..3182892792f0 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/dumper.go @@ -0,0 +1,118 @@ +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 + +// This file contains the Dumper API for writing YAML documents. +// +// Primary functions: +// - Dump: Encode value(s) to YAML (use WithAll for multi-doc) +// - NewDumper: Create a streaming dumper to io.Writer + +package yaml + +import ( + "bytes" + "errors" + "io" + "reflect" + + "go.yaml.in/yaml/v4/internal/libyaml" +) + +// Dump encodes a value to YAML with the given options. +// +// By default, Dump encodes a single value as a single YAML document. +// +// Use WithAllDocuments() to encode multiple values as a multi-document stream: +// +// docs := []Config{config1, config2, config3} +// yaml.Dump(docs, yaml.WithAllDocuments()) +// +// When WithAllDocuments is used, in must be a slice. +// Each element is encoded as a separate YAML document with "---" separators. +// +// See [Marshal] for details about the conversion of Go values to YAML. +func Dump(in any, opts ...Option) (out []byte, err error) { + defer handleErr(&err) + + o, err := libyaml.ApplyOptions(opts...) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + d, err := NewDumper(&buf, func(opts *libyaml.Options) error { + *opts = *o // Copy options + return nil + }) + if err != nil { + return nil, err + } + + if o.AllDocuments { + // Multi-document mode: in must be a slice + inVal := reflect.ValueOf(in) + if inVal.Kind() != reflect.Slice { + return nil, &LoadErrors{Errors: []*libyaml.ConstructError{{ + Err: errors.New("yaml: WithAllDocuments requires a slice input"), + }}} + } + + // Dump each element as a separate document + for i := 0; i < inVal.Len(); i++ { + if err := d.Dump(inVal.Index(i).Interface()); err != nil { + return nil, err + } + } + } else { + // Single-document mode + if err := d.Dump(in); err != nil { + return nil, err + } + } + + if err := d.Close(); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// A Dumper writes YAML values to an output stream with configurable options. +type Dumper struct { + encoder *libyaml.Representer + opts *libyaml.Options +} + +// NewDumper returns a new Dumper that writes to w with the given options. +// +// The Dumper should be closed after use to flush all data to w. +func NewDumper(w io.Writer, opts ...Option) (*Dumper, error) { + o, err := libyaml.ApplyOptions(opts...) + if err != nil { + return nil, err + } + return &Dumper{ + encoder: libyaml.NewRepresenter(w, o), + opts: o, + }, nil +} + +// Dump writes the YAML encoding of v to the stream. +// +// If multiple values are dumped to the stream, the second and subsequent +// documents will be preceded with a "---" document separator. +// +// See the documentation for [Marshal] for details about the conversion of Go +// values to YAML. +func (d *Dumper) Dump(v any) (err error) { + defer handleErr(&err) + d.encoder.MarshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the Dumper by writing any remaining data. +// It does not write a stream terminating string "...". +func (d *Dumper) Close() (err error) { + defer handleErr(&err) + d.encoder.Finish() + return nil +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/README.md b/vendor/go.yaml.in/yaml/v4/internal/libyaml/README.md new file mode 100644 index 000000000000..62dbeac3e447 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/README.md @@ -0,0 +1,533 @@ +# internal/libyaml + +This package provides low-level YAML processing functionality through a 3-stage +pipeline: Scanner → Parser → Emitter. +It implements the libyaml C library functionality in Go. + +## Directory Overview + +The `internal/libyaml` package implements the core YAML processing stages: + +1. **Scanner** - Tokenizes YAML text into tokens +2. **Parser** - Converts tokens into events following YAML grammar rules +3. **Emitter** - Serializes events back into YAML text + +## File Organization + +### Main Source Files + +- **scanner.go** - YAML scanner/tokenizer implementation +- **parser.go** - YAML parser (tokens → events) +- **emitter.go** - YAML emitter (events → YAML output) +- **api.go** - Public API for Parser and Emitter types +- **yaml.go** - Core types and constants (Event, Token, enums) +- **reader.go** - Input handling and encoding detection +- **writer.go** - Output handling +- **yamlprivate.go** - Internal types and helper functions + +### Test Files + +- **scanner_test.go** - Scanner tests +- **parser_test.go** - Parser tests +- **emitter_test.go** - Emitter tests +- **api_test.go** - API tests +- **yaml_test.go** - Utility function tests +- **reader_test.go** - Reader tests +- **writer_test.go** - Writer tests +- **yamlprivate_test.go** - Character classification tests +- **loader_test.go** - Data loader scalar resolution tests +- **yamldatatest_test.go** - YAML test data loading framework +- **yamldatatest_loader.go** - YAML test data loader with scalar type resolution (exported for reuse) + +### Test Data Files (in `testdata/`) + +- **scanner.yaml** - Scanner test cases +- **parser.yaml** - Parser test cases +- **emitter.yaml** - Emitter test cases +- **api.yaml** - API test cases +- **yaml.yaml** - Utility function test cases +- **reader.yaml** - Reader test cases +- **writer.yaml** - Writer test cases +- **yamlprivate.yaml** - Character classification test cases +- **loader.yaml** - Data loader scalar resolution test cases + +## Processing Pipeline + +### 1. Scanner (scanner.go) + +The scanner converts YAML text into tokens. + +**Input**: Raw YAML text (string or []byte) +**Output**: Stream of tokens + +**Token types include**: +- `SCALAR_TOKEN` - Plain, quoted, or block scalar values +- `KEY_TOKEN`, `VALUE_TOKEN` - Mapping key/value indicators +- `BLOCK_MAPPING_START_TOKEN`, `FLOW_MAPPING_START_TOKEN` - Mapping delimiters +- `BLOCK_SEQUENCE_START_TOKEN`, `FLOW_SEQUENCE_START_TOKEN` - Sequence delimiters +- `ANCHOR_TOKEN`, `ALIAS_TOKEN` - Anchor definitions and references +- `TAG_TOKEN` - Type tags +- `DOCUMENT_START_TOKEN`, `DOCUMENT_END_TOKEN` - Document boundaries + +**Responsibilities**: +- Character encoding detection (UTF-8, UTF-16LE, UTF-16BE) +- Line break normalization +- Indentation tracking +- Quote and escape sequence handling + +### 2. Parser (parser.go) + +The parser converts tokens into events following YAML grammar rules. + +**Input**: Stream of tokens from Scanner +**Output**: Stream of events + +**Event types include**: +- `STREAM_START_EVENT`, `STREAM_END_EVENT` - Stream boundaries +- `DOCUMENT_START_EVENT`, `DOCUMENT_END_EVENT` - Document boundaries +- `SCALAR_EVENT` - Scalar values +- `MAPPING_START_EVENT`, `MAPPING_END_EVENT` - Mapping boundaries +- `SEQUENCE_START_EVENT`, `SEQUENCE_END_EVENT` - Sequence boundaries +- `ALIAS_EVENT` - Anchor references + +**Responsibilities**: +- Implementing YAML grammar and validation +- Managing document directives (%YAML, %TAG) +- Resolving anchors and aliases +- Tracking implicit vs explicit markers +- Style preservation (plain, single-quoted, double-quoted, literal, folded) + +### 3. Emitter (emitter.go) + +The emitter converts events back into YAML text. + +**Input**: Stream of events +**Output**: YAML text + +**Responsibilities**: +- Style selection (plain/quoted scalars, block/flow collections) +- Formatting control (canonical mode, indentation, line width) +- Character encoding +- Anchor and tag serialization +- Document marker generation (---, ...) + +**Configuration options**: +- `Canonical` - Emit in canonical YAML form +- `Indent` - Indentation width (2-9 spaces) +- `Width` - Line width (-1 for unlimited) +- `Unicode` - Enable Unicode character output +- `LineBreak` - Line break style (LN, CR, CRLN) + +## Testing Framework + +### Test Architecture + +The testing framework uses a data-driven approach: + +1. **Test data** is stored in YAML files in the `testdata/` directory +2. **Test logic** is implemented in Go files (`*_test.go`) +3. **One-to-one pairing**: Each `testdata/foo.yaml` has a corresponding `foo_test.go` + +**Benefits**: +- Easy to add new test cases without writing Go code +- Test data is human-readable and self-documenting +- Test logic is reusable across many test cases +- Test data is separated from test code for clarity +- Tests can become a common suite for multiple YAML frameworks + +### Test Data Files + +Each YAML file contains test cases for a specific component: + +- **scanner.yaml** - Scanner/tokenization tests + - Token sequence verification + - Token property validation (value, style) + - Error detection + +- **parser.yaml** - Parser/event generation tests + - Event sequence verification + - Event property validation (anchor, tag, value, directives) + - Error detection + +- **emitter.yaml** - Emitter/serialization tests + - Event-to-YAML conversion + - Configuration options testing + - Roundtrip testing (parse → emit) + - Writer integration + +- **api.yaml** - API constructor and method tests + - Constructor validation + - Method behavior and state changes + - Panic conditions + - Cleanup verification + +- **yaml.yaml** - Utility function tests + - Enum String() methods + - Style accessor methods + +- **reader.yaml** - Reader/input handling tests + - Encoding detection (UTF-8, UTF-16LE, UTF-16BE) + - Buffer management + - Error handling + +- **writer.yaml** - Writer/output handling tests + - Buffer flushing + - Output handlers (string, io.Writer) + - Error conditions + +- **yamlprivate.yaml** - Character classification tests + - Character type predicates (isAlpha, isDigit, isHex, etc.) + - Character conversion functions (asDigit, asHex, width) + - Unicode handling + +- **loader.yaml** - Data loader scalar resolution tests + - Numeric type resolution (integers, floats) + - Boolean and null value handling + - String vs numeric type disambiguation + - Mixed-type collections + +### Test Framework Implementation + +The test framework is implemented in `yamldatatest_loader.go` and `yamldatatest_test.go`: + +**Core functions**: +- `LoadYAML(data []byte) (interface{}, error)` - Parses YAML using libyaml parser with scalar type resolution (exported) +- `UnmarshalStruct(target interface{}, data map[string]interface{}) error` - Populates structs (exported) +- `LoadTestCases(filename string) ([]TestCase, error)` - Loads and parses test YAML files +- `coerceScalar(value string) interface{}` - Resolves scalar strings to appropriate Go types (int, float64, bool, nil, string) + +**Core types**: +- `TestCase` struct - Umbrella structure containing fields for all test types + - Uses `interface{}` for flexible field types + - Post-processing converts generic fields to specific types + +**Post-processing**: +After loading, the framework processes test data: +- Converts `Want` (interface{}) to `WantEvents`, `WantTokens`, or `WantSpecs` based on test type +- Converts `Want` (interface{}) to `WantContains` (handles both scalar and sequence) +- Converts `Checks` to field validation specifications + +### Test Types + +#### Scanner Tests + +**scan-tokens** - Verify token sequence + +```yaml +- scan-tokens: + name: Simple scalar + yaml: |- + hello + want: + - STREAM_START_TOKEN + - SCALAR_TOKEN + - STREAM_END_TOKEN +``` + +**scan-tokens-detailed** - Verify token properties + +```yaml +- scan-tokens-detailed: + name: Single quoted scalar + yaml: |- + 'hello world' + want: + - STREAM_START_TOKEN + - SCALAR_TOKEN: + style: SINGLE_QUOTED_SCALAR_STYLE + value: hello world + - STREAM_END_TOKEN +``` + +**scan-error** - Verify error detection + +```yaml +- scan-error: + name: Invalid character + yaml: "\x01" +``` + +#### Parser Tests + +**parse-events** - Verify event sequence + +```yaml +- parse-events: + name: Simple mapping + yaml: | + key: value + want: + - STREAM_START_EVENT + - DOCUMENT_START_EVENT + - MAPPING_START_EVENT + - SCALAR_EVENT + - SCALAR_EVENT + - MAPPING_END_EVENT + - DOCUMENT_END_EVENT + - STREAM_END_EVENT +``` + +**parse-events-detailed** - Verify event properties + +```yaml +- parse-events-detailed: + name: Anchor and alias + yaml: | + - &anchor value + - *anchor + want: + - STREAM_START_EVENT + - DOCUMENT_START_EVENT + - SEQUENCE_START_EVENT + - SCALAR_EVENT: + anchor: anchor + value: value + - ALIAS_EVENT: + anchor: anchor + - SEQUENCE_END_EVENT + - DOCUMENT_END_EVENT + - STREAM_END_EVENT +``` + +**parse-error** - Verify error detection + +```yaml +- parse-error: + name: Error state + yaml: | + key: : invalid +``` + +#### Emitter Tests + +**emit** - Emit events and verify output contains expected strings + +```yaml +- emit: + name: Simple scalar + data: + - STREAM_START_EVENT: + encoding: UTF8_ENCODING + - DOCUMENT_START_EVENT: + implicit: true + - SCALAR_EVENT: + value: hello + implicit: true + style: PLAIN_SCALAR_STYLE + - DOCUMENT_END_EVENT: + implicit: true + - STREAM_END_EVENT + want: hello +``` + +**emit-config** - Emit with configuration + +```yaml +- emit-config: + name: Custom indent + conf: + indent: 4 + data: + - STREAM_START_EVENT: + encoding: UTF8_ENCODING + - DOCUMENT_START_EVENT: + implicit: true + - MAPPING_START_EVENT: + implicit: true + style: BLOCK_MAPPING_STYLE + # ... more events + want: key +``` + +**roundtrip** - Parse → emit, verify output + +```yaml +- roundtrip: + name: Roundtrip + yaml: | + key: value + list: + - item1 + - item2 + want: + - key + - value + - item1 +``` + +**emit-writer** - Emit to io.Writer + +```yaml +- emit-writer: + name: Writer + data: + - STREAM_START_EVENT: + encoding: UTF8_ENCODING + # ... more events + want: test +``` + +#### API Tests + +**api-new** - Test constructors + +```yaml +- api-new: + name: New parser + with: NewParser + test: + - nil: [raw-buffer, false] + - cap: [raw-buffer, 512] + - nil: [buffer, false] + - cap: [buffer, 1536] +``` + +**api-method** - Test methods and field state + +```yaml +- api-method: + name: Parser set input string + with: NewParser + byte: true + call: [SetInputString, 'key: value'] + test: + - eq: [input, 'key: value'] + - eq: [input-pos, 0] + - nil: [read-handler, false] +``` + +**api-panic** - Test methods that should panic + +```yaml +- api-panic: + name: Parser set input string twice + with: NewParser + byte: true + init: [SetInputString, first] + call: [SetInputString, second] + want: must set the input source only once +``` + +**api-delete** - Test cleanup + +```yaml +- api-delete: + name: Parser delete + with: NewParser + byte: true + init: [SetInputString, test] + test: + - len: [input, 0] + - len: [buffer, 0] +``` + +**api-new-event** - Test event constructors + +```yaml +- api-new-event: + name: New stream start event + call: [NewStreamStartEvent, UTF8_ENCODING] + test: + - eq: [Type, STREAM_START_EVENT] + - eq: [encoding, UTF8_ENCODING] +``` + +#### Utility Tests + +**enum-string** - Test String() methods of enums + +```yaml +- enum-string: + name: Scalar style plain + enum: [ScalarStyle, PLAIN_SCALAR_STYLE] + want: Plain +``` + +**style-accessor** - Test style accessor methods + +```yaml +- style-accessor: + name: Event scalar style + test: [ScalarStyle, DOUBLE_QUOTED_SCALAR_STYLE] +``` + +#### Loader Tests + +**scalar-resolution** - Test scalar type resolution + +```yaml +- scalar-resolution: + name: Positive integer + yaml: "42" + want: 42 + +- scalar-resolution: + name: Negative float + yaml: "-2.5" + want: -2.5 +``` + +**Resolution order**: +1. Boolean (true, false) +2. Null (null keyword only) +3. Hexadecimal integer (0x prefix) +4. Float (contains .) +5. Decimal integer +6. String (fallback) + +### Common Keys in Test YAML Files + +Test cases use a **type-as-key** format where the test type is the map key: + +```yaml +- test-type: + name: Test case name + # ... other fields +``` + +**Common fields**: +- **name** - Test case name (title case convention) +- **yaml** - Input YAML string to test +- **want** - Expected result (format varies by test type) + - For api-panic: string containing expected panic message substring + - For scan-error/parse-error: boolean (defaults to true if omitted; set to false if no error expected) + - For enum-string: string representing expected String() output + - For other types: varies (may be sequence or scalar) +- **data** - For emitter tests: list of event specifications to emit +- **conf** - For emitter config tests: emitter configuration options +- **with** - For API tests: constructor name (NewParser, NewEmitter) +- **call** - For API tests: method call [MethodName, arg1, arg2, ...] +- **init** - For API panic tests: setup method call before main method +- **byte** - For API tests: boolean flag to convert string args to []byte +- **test** - For API tests: list of field validation checks in format `operator: [field, value]` where operator is one of: nil, cap, len, eq, gte, len-gt. +- **test** - For style-accessor tests: array of [Method, STYLE] where Method is the accessor method (e.g., ScalarStyle) and STYLE is the style constant (e.g., DOUBLE_QUOTED_SCALAR_STYLE). +- **enum** - For enum tests: array of [Type, Value] where Type is the enum type (e.g., ScalarStyle) and Value is the constant (e.g., PLAIN_SCALAR_STYLE) + +**Note on scalar type resolution**: Unquoted scalar values in test data are automatically resolved to appropriate Go types (int, float64, bool, nil) by the `LoadYAML` function. Quoted scalars remain as strings. + +### Running Tests + +```bash +# Run all tests in the package +go test ./internal/libyaml + +# Run specific test file +go test ./internal/libyaml -run TestScanner +go test ./internal/libyaml -run TestParser +go test ./internal/libyaml -run TestEmitter +go test ./internal/libyaml -run TestAPI +go test ./internal/libyaml -run TestYAML +go test ./internal/libyaml -run TestLoader + +# Run specific test case (using subtest name) +go test ./internal/libyaml -run TestScanner/Block_sequence +go test ./internal/libyaml -run TestParser/Anchor_and_alias +go test ./internal/libyaml -run TestEmitter/Flow_mapping +go test ./internal/libyaml -run TestLoader/Scientific_notation_lowercase_e + +# Run with verbose output +go test -v ./internal/libyaml + +# Run with coverage +go test -cover ./internal/libyaml +``` diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/api.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/api.go new file mode 100644 index 000000000000..f0807f5b0699 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/api.go @@ -0,0 +1,733 @@ +// Copyright 2006-2010 Kirill Simonov +// Copyright 2011-2019 Canonical Ltd +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 AND MIT + +// High-level API helpers for parser and emitter initialization and +// configuration. +// Provides convenience functions for token insertion and stream management. + +package libyaml + +import ( + "io" +) + +func (parser *Parser) insertToken(pos int, token *Token) { + // fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// NewParser creates a new parser object. +func NewParser() Parser { + return Parser{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } +} + +// Delete a parser object. +func (parser *Parser) Delete() { + *parser = Parser{} +} + +// String read handler. +func yamlStringReadHandler(parser *Parser, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yamlReaderReadHandler(parser *Parser, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// SetInputString sets a string input. +func (parser *Parser) SetInputString(input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yamlStringReadHandler + parser.input = input + parser.input_pos = 0 +} + +// SetInputReader sets a file input. +func (parser *Parser) SetInputReader(r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yamlReaderReadHandler + parser.input_reader = r +} + +// SetEncoding sets the source encoding. +func (parser *Parser) SetEncoding(encoding Encoding) { + if parser.encoding != ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// GetPendingComments returns the parser's comment queue for CLI access. +func (parser *Parser) GetPendingComments() []Comment { + return parser.comments +} + +// GetCommentsHead returns the current position in the comment queue. +func (parser *Parser) GetCommentsHead() int { + return parser.comments_head +} + +// NewEmitter creates a new emitter object. +func NewEmitter() Emitter { + return Emitter{ + buffer: make([]byte, output_buffer_size), + states: make([]EmitterState, 0, initial_stack_size), + events: make([]Event, 0, initial_queue_size), + best_width: -1, + } +} + +// Delete an emitter object. +func (emitter *Emitter) Delete() { + *emitter = Emitter{} +} + +// String write handler. +func yamlStringWriteHandler(emitter *Emitter, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yamlWriterWriteHandler uses emitter.output_writer to write the +// emitted text. +func yamlWriterWriteHandler(emitter *Emitter, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// SetOutputString sets a string output. +func (emitter *Emitter) SetOutputString(output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yamlStringWriteHandler + emitter.output_buffer = output_buffer +} + +// SetOutputWriter sets a file output. +func (emitter *Emitter) SetOutputWriter(w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yamlWriterWriteHandler + emitter.output_writer = w +} + +// SetEncoding sets the output encoding. +func (emitter *Emitter) SetEncoding(encoding Encoding) { + if emitter.encoding != ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// SetCanonical sets the canonical output style. +func (emitter *Emitter) SetCanonical(canonical bool) { + emitter.canonical = canonical +} + +// SetIndent sets the indentation increment. +func (emitter *Emitter) SetIndent(indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.BestIndent = indent +} + +// SetWidth sets the preferred line width. +func (emitter *Emitter) SetWidth(width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// SetUnicode sets if unescaped non-ASCII characters are allowed. +func (emitter *Emitter) SetUnicode(unicode bool) { + emitter.unicode = unicode +} + +// SetLineBreak sets the preferred line break character. +func (emitter *Emitter) SetLineBreak(line_break LineBreak) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// NewStreamStartEvent creates a new STREAM-START event. +func NewStreamStartEvent(encoding Encoding) Event { + return Event{ + Type: STREAM_START_EVENT, + encoding: encoding, + } +} + +// NewStreamEndEvent creates a new STREAM-END event. +func NewStreamEndEvent() Event { + return Event{ + Type: STREAM_END_EVENT, + } +} + +// NewDocumentStartEvent creates a new DOCUMENT-START event. +func NewDocumentStartEvent(version_directive *VersionDirective, tag_directives []TagDirective, implicit bool) Event { + return Event{ + Type: DOCUMENT_START_EVENT, + versionDirective: version_directive, + tagDirectives: tag_directives, + Implicit: implicit, + } +} + +// NewDocumentEndEvent creates a new DOCUMENT-END event. +func NewDocumentEndEvent(implicit bool) Event { + return Event{ + Type: DOCUMENT_END_EVENT, + Implicit: implicit, + } +} + +// NewAliasEvent creates a new ALIAS event. +func NewAliasEvent(anchor []byte) Event { + return Event{ + Type: ALIAS_EVENT, + Anchor: anchor, + } +} + +// NewScalarEvent creates a new SCALAR event. +func NewScalarEvent(anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style ScalarStyle) Event { + return Event{ + Type: SCALAR_EVENT, + Anchor: anchor, + Tag: tag, + Value: value, + Implicit: plain_implicit, + quoted_implicit: quoted_implicit, + Style: Style(style), + } +} + +// NewSequenceStartEvent creates a new SEQUENCE-START event. +func NewSequenceStartEvent(anchor, tag []byte, implicit bool, style SequenceStyle) Event { + return Event{ + Type: SEQUENCE_START_EVENT, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + Style: Style(style), + } +} + +// NewSequenceEndEvent creates a new SEQUENCE-END event. +func NewSequenceEndEvent() Event { + return Event{ + Type: SEQUENCE_END_EVENT, + } +} + +// NewMappingStartEvent creates a new MAPPING-START event. +func NewMappingStartEvent(anchor, tag []byte, implicit bool, style MappingStyle) Event { + return Event{ + Type: MAPPING_START_EVENT, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + Style: Style(style), + } +} + +// NewMappingEndEvent creates a new MAPPING-END event. +func NewMappingEndEvent() Event { + return Event{ + Type: MAPPING_END_EVENT, + } +} + +// Delete an event object. +func (e *Event) Delete() { + *e = Event{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/composer.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/composer.go new file mode 100644 index 000000000000..57dd7f1e6ed7 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/composer.go @@ -0,0 +1,362 @@ +// Copyright 2011-2019 Canonical Ltd +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 + +// Composer stage: Builds a node tree from a libyaml event stream. +// Handles document structure, anchors, and comment attachment. + +package libyaml + +import ( + "fmt" + "io" +) + +// Composer produces a node tree out of a libyaml event stream. +type Composer struct { + Parser Parser + event Event + doc *Node + anchors map[string]*Node + doneInit bool + Textless bool + streamNodes bool // enable stream node emission + returnStream bool // flag to return stream node next + atStreamEnd bool // at stream end + encoding Encoding // stream encoding from STREAM_START +} + +// NewComposer creates a new composer from a byte slice. +func NewComposer(b []byte) *Composer { + p := Composer{ + Parser: NewParser(), + } + if len(b) == 0 { + b = []byte{'\n'} + } + p.Parser.SetInputString(b) + return &p +} + +// NewComposerFromReader creates a new composer from an io.Reader. +func NewComposerFromReader(r io.Reader) *Composer { + p := Composer{ + Parser: NewParser(), + } + p.Parser.SetInputReader(r) + return &p +} + +func (c *Composer) init() { + if c.doneInit { + return + } + c.anchors = make(map[string]*Node) + // Peek to get the encoding from STREAM_START_EVENT + if c.peek() == STREAM_START_EVENT { + c.encoding = c.event.GetEncoding() + } + c.expect(STREAM_START_EVENT) + c.doneInit = true + + // If stream nodes are enabled, prepare to return the first stream node + if c.streamNodes { + c.returnStream = true + } +} + +func (c *Composer) Destroy() { + if c.event.Type != NO_EVENT { + c.event.Delete() + } + c.Parser.Delete() +} + +// SetStreamNodes enables or disables stream node emission. +func (c *Composer) SetStreamNodes(enable bool) { + c.streamNodes = enable +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (c *Composer) expect(e EventType) { + if c.event.Type == NO_EVENT { + if err := c.Parser.Parse(&c.event); err != nil { + c.fail(err) + } + } + if c.event.Type == STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if c.event.Type != e { + c.fail(fmt.Errorf("expected %s event but got %s", e, c.event.Type)) + } + c.event.Delete() + c.event.Type = NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into c.event and returns the event type. +func (c *Composer) peek() EventType { + if c.event.Type != NO_EVENT { + return c.event.Type + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if err := c.Parser.Parse(&c.event); err != nil { + c.fail(err) + } + return c.event.Type +} + +func (c *Composer) fail(err error) { + Fail(err) +} + +func (c *Composer) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + c.anchors[n.Anchor] = n + } +} + +// Parse parses the next YAML node from the event stream. +func (c *Composer) Parse() *Node { + c.init() + + // Handle stream nodes if enabled + if c.streamNodes { + // Check for stream end first + if c.peek() == STREAM_END_EVENT { + // If we haven't returned the final stream node yet, return it now + if !c.atStreamEnd { + c.atStreamEnd = true + return c.createStreamNode() + } + // Already returned final stream node + return nil + } + + // Check if we should return a stream node before the next document + if c.returnStream { + c.returnStream = false + n := c.createStreamNode() + // Capture directives from upcoming document + c.captureDirectives(n) + return n + } + } + + switch c.peek() { + case SCALAR_EVENT: + return c.scalar() + case ALIAS_EVENT: + return c.alias() + case MAPPING_START_EVENT: + return c.mapping() + case SEQUENCE_START_EVENT: + return c.sequence() + case DOCUMENT_START_EVENT: + return c.document() + case STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer (when not using stream nodes). + return nil + case TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + c.event.Type.String()) + } +} + +func (c *Composer) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + // Normalize tag to short form (e.g., tag:yaml.org,2002:str -> !!str) + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + // Delegate to resolver to determine tag from value + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !c.Textless { + n.Line = c.event.StartMark.Line + 1 + n.Column = c.event.StartMark.Column + 1 + n.HeadComment = string(c.event.HeadComment) + n.LineComment = string(c.event.LineComment) + n.FootComment = string(c.event.FootComment) + } + return n +} + +func (c *Composer) parseChild(parent *Node) *Node { + child := c.Parse() + parent.Content = append(parent.Content, child) + return child +} + +func (c *Composer) document() *Node { + n := c.node(DocumentNode, "", "", "") + c.doc = n + c.expect(DOCUMENT_START_EVENT) + c.parseChild(n) + if c.peek() == DOCUMENT_END_EVENT { + n.FootComment = string(c.event.FootComment) + } + c.expect(DOCUMENT_END_EVENT) + + // If stream nodes enabled, prepare to return a stream node next + if c.streamNodes { + c.returnStream = true + } + + return n +} + +func (c *Composer) createStreamNode() *Node { + n := &Node{ + Kind: StreamNode, + Encoding: c.encoding, + } + if !c.Textless && c.event.Type != NO_EVENT { + n.Line = c.event.StartMark.Line + 1 + n.Column = c.event.StartMark.Column + 1 + } + return n +} + +// captureDirectives captures version and tag directives from upcoming DOCUMENT_START. +func (c *Composer) captureDirectives(n *Node) { + if c.peek() == DOCUMENT_START_EVENT { + if vd := c.event.GetVersionDirective(); vd != nil { + n.Version = &StreamVersionDirective{ + Major: vd.Major(), + Minor: vd.Minor(), + } + } + if tds := c.event.GetTagDirectives(); len(tds) > 0 { + n.TagDirectives = make([]StreamTagDirective, len(tds)) + for i, td := range tds { + n.TagDirectives[i] = StreamTagDirective{ + Handle: td.GetHandle(), + Prefix: td.GetPrefix(), + } + } + } + } +} + +func (c *Composer) alias() *Node { + n := c.node(AliasNode, "", "", string(c.event.Anchor)) + n.Alias = c.anchors[n.Value] + if n.Alias == nil { + msg := fmt.Sprintf("unknown anchor '%s' referenced", n.Value) + Fail(&ParserError{ + Message: msg, + Mark: Mark{ + Line: n.Line, + Column: n.Column, + }, + }) + } + c.expect(ALIAS_EVENT) + return n +} + +func (c *Composer) scalar() *Node { + parsedStyle := c.event.ScalarStyle() + var nodeStyle Style + switch { + case parsedStyle&DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + nodeValue := string(c.event.Value) + nodeTag := string(c.event.Tag) + var defaultTag string + if nodeStyle != 0 { + defaultTag = strTag + } + n := c.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + c.anchor(n, c.event.Anchor) + c.expect(SCALAR_EVENT) + return n +} + +func (c *Composer) sequence() *Node { + n := c.node(SequenceNode, seqTag, string(c.event.Tag), "") + if c.event.SequenceStyle()&FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + c.anchor(n, c.event.Anchor) + c.expect(SEQUENCE_START_EVENT) + for c.peek() != SEQUENCE_END_EVENT { + c.parseChild(n) + } + n.LineComment = string(c.event.LineComment) + n.FootComment = string(c.event.FootComment) + c.expect(SEQUENCE_END_EVENT) + return n +} + +func (c *Composer) mapping() *Node { + n := c.node(MappingNode, mapTag, string(c.event.Tag), "") + block := true + if c.event.MappingStyle()&FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + c.anchor(n, c.event.Anchor) + c.expect(MAPPING_START_EVENT) + for c.peek() != MAPPING_END_EVENT { + k := c.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := c.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if c.peek() == TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(c.event.FootComment) + } + c.expect(TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(c.event.LineComment) + n.FootComment = string(c.event.FootComment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + c.expect(MAPPING_END_EVENT) + return n +} + +func Fail(err error) { + panic(&YAMLError{err}) +} + +func failf(format string, args ...any) { + panic(&YAMLError{fmt.Errorf("yaml: "+format, args...)}) +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/constructor.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/constructor.go new file mode 100644 index 000000000000..66f209a59853 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/constructor.go @@ -0,0 +1,1187 @@ +// Copyright 2011-2019 Canonical Ltd +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 + +// Constructor stage: Converts YAML nodes to Go values. +// Handles type resolution, custom unmarshalers, and struct field mapping. + +package libyaml + +import ( + "encoding" + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "strings" + "sync" + "time" +) + +// -------------------------------------------------------------------------- +// Interfaces and types needed by constructor + +// constructor interface may be implemented by types to customize their +// behavior when being constructed from a YAML document. +type constructor interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteConstructor interface { + UnmarshalYAML(construct func(any) error) error +} + +// Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. +type Marshaler interface { + MarshalYAML() (any, error) +} + +// IsZeroer is used to check whether an object is zero to determine whether +// it should be omitted when marshaling with the ,omitempty flag. One notable +// implementation is time.Time. +type IsZeroer interface { + IsZero() bool +} + +// handleErr recovers from panics caused by yaml errors +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(*YAMLError); ok { + *err = e.Err + } else { + panic(v) + } + } +} + +// -------------------------------------------------------------------------- +// Struct field information + +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineConstructors holds indexes to inlined fields that + // contain constructor values. + InlineConstructors [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var ( + structMap = make(map[reflect.Type]*structInfo) + fieldMapMutex sync.RWMutex + constructorType reflect.Type +) + +func init() { + var v constructor + constructorType = reflect.ValueOf(&v).Elem().Type() +} + +// hasConstructYAMLMethod checks if a type has an UnmarshalYAML method +// that looks like it implements yaml.Unmarshaler (from root package). +// This is needed because we can't directly check for the interface type +// since it's in a different package that we can't import. +func hasConstructYAMLMethod(t reflect.Type) bool { + method, found := t.MethodByName("UnmarshalYAML") + if !found { + return false + } + + // Check signature: func(*T) UnmarshalYAML(*Node) error + mtype := method.Type + if mtype.NumIn() != 2 || mtype.NumOut() != 1 { + return false + } + + // First param is receiver (already checked by MethodByName) + // Second param should be a pointer to a Node-like struct + paramType := mtype.In(1) + if paramType.Kind() != reflect.Ptr { + return false + } + + elemType := paramType.Elem() + if elemType.Kind() != reflect.Struct || elemType.Name() != "Node" { + return false + } + + // Return type should be error + retType := mtype.Out(0) + if retType.Kind() != reflect.Interface || retType.Name() != "error" { + return false + } + + return true +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineConstructors := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && !strings.Contains(string(field.Tag), ":") { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, fmt.Errorf("unsupported flag %q in tag %q of type %s", flag, tag, st) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Pointer: + ftype := field.Type + for ftype.Kind() == reflect.Pointer { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + // Check for both libyaml.constructor and yaml.Unmarshaler (by method name) + if reflect.PointerTo(ftype).Implements(constructorType) || hasConstructYAMLMethod(reflect.PointerTo(ftype)) { + inlineConstructors = append(inlineConstructors, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineConstructors { + inlineConstructors = append(inlineConstructors, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineConstructors: inlineConstructors, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// isZero reports whether v represents the zero value for its type. +// If v implements the IsZeroer interface, IsZero() is called. +// Otherwise, zero is determined by checking type-specific conditions. +// This is used to determine omitempty behavior when marshaling. +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Pointer || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Pointer: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +type Constructor struct { + doc *Node + aliases map[*Node]bool + TypeErrors []*ConstructError + + stringMapType reflect.Type + generalMapType reflect.Type + + KnownFields bool + UniqueKeys bool + constructCount int + aliasCount int + aliasDepth int + + mergedFields map[any]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]any{}) + generalMapType = reflect.TypeOf(map[any]any{}) + ifaceType = generalMapType.Elem() +) + +func NewConstructor(opts *Options) *Constructor { + return &Constructor{ + stringMapType: stringMapType, + generalMapType: generalMapType, + KnownFields: opts.KnownFields, + UniqueKeys: opts.UniqueKeys, + aliases: make(map[*Node]bool), + } +} + +// Construct decodes YAML input into the provided output value. +// The out parameter must be a pointer to the value to decode into. +// Returns a [LoadErrors] if type mismatches occur during decoding. +func Construct(in []byte, out any, opts *Options) error { + d := NewConstructor(opts) + p := NewComposer(in) + defer p.Destroy() + node := p.Parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Pointer && !v.IsNil() { + v = v.Elem() + } + d.Construct(node, v) + } + if len(d.TypeErrors) > 0 { + return &LoadErrors{Errors: d.TypeErrors} + } + return nil +} + +func (c *Constructor) tagError(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + c.TypeErrors = append(c.TypeErrors, &ConstructError{ + Err: fmt.Errorf("cannot construct %s%s into %s", shortTag(tag), value, out.Type()), + Line: n.Line, + Column: n.Column, + }) +} + +func (c *Constructor) callConstructor(n *Node, u constructor) (good bool) { + err := u.UnmarshalYAML(n) + switch e := err.(type) { + case nil: + return true + case *LoadErrors: + c.TypeErrors = append(c.TypeErrors, e.Errors...) + return false + default: + c.TypeErrors = append(c.TypeErrors, &ConstructError{ + Err: err, + Line: n.Line, + Column: n.Column, + }) + return false + } +} + +func (c *Constructor) callObsoleteConstructor(n *Node, u obsoleteConstructor) (good bool) { + terrlen := len(c.TypeErrors) + err := u.UnmarshalYAML(func(v any) (err error) { + defer handleErr(&err) + c.Construct(n, reflect.ValueOf(v)) + if len(c.TypeErrors) > terrlen { + issues := c.TypeErrors[terrlen:] + c.TypeErrors = c.TypeErrors[:terrlen] + return &LoadErrors{issues} + } + return nil + }) + switch e := err.(type) { + case nil: + return true + case *LoadErrors: + c.TypeErrors = append(c.TypeErrors, e.Errors...) + return false + default: + c.TypeErrors = append(c.TypeErrors, &ConstructError{ + Err: err, + Line: n.Line, + Column: n.Column, + }) + return false + } +} + +func isTextUnmarshaler(out reflect.Value) bool { + // Dereference pointers to check the underlying type, + // similar to how prepare() handles Constructor checks. + for out.Kind() == reflect.Pointer { + if out.IsNil() { + // Create a new instance to check the type + out = reflect.New(out.Type().Elem()).Elem() + } else { + out = out.Elem() + } + } + if out.CanAddr() { + _, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + return ok + } + return false +} + +// prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// construction was already done by UnmarshalYAML, and if so whether +// its types constructed appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (c *Constructor) prepare(n *Node, out reflect.Value) (newout reflect.Value, constructed, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Pointer { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + // Try yaml.Unmarshaler (from root package) first + if called, good := c.tryCallYAMLConstructor(n, out); called { + return out, true, good + } + + outi := out.Addr().Interface() + // Check for libyaml.constructor + if u, ok := outi.(constructor); ok { + good = c.callConstructor(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteConstructor); ok { + good = c.callObsoleteConstructor(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (c *Constructor) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Pointer { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(constructCount int) float64 { + switch { + case constructCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case constructCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(constructCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +// constructorAdapter is an interface that wraps the root package's Unmarshaler interface. +// This allows the constructor to call constructors that expect *yaml.Node instead of *libyaml.Node. +type constructorAdapter interface { + CallRootConstructor(n *Node) error +} + +// tryCallYAMLConstructor checks if the value has an UnmarshalYAML method that takes +// a *yaml.Node (from the root package) and calls it if found. +// This handles the case where user types implement yaml.Unmarshaler instead of libyaml.constructor. +func (c *Constructor) tryCallYAMLConstructor(n *Node, out reflect.Value) (called bool, good bool) { + if !out.CanAddr() { + return false, false + } + + addr := out.Addr() + // Check for UnmarshalYAML method + method := addr.MethodByName("UnmarshalYAML") + if !method.IsValid() { + return false, false + } + + // Check method signature: func(*yaml.Node) error + mtype := method.Type() + if mtype.NumIn() != 1 || mtype.NumOut() != 1 { + return false, false + } + + // Check if parameter is a pointer to a Node-like struct + paramType := mtype.In(0) + if paramType.Kind() != reflect.Ptr { + return false, false + } + + elemType := paramType.Elem() + if elemType.Kind() != reflect.Struct { + return false, false + } + + // Check if it's the same underlying type as our Node + // Both yaml.Node and libyaml.Node have the same structure + if elemType.Name() != "Node" { + return false, false + } + + // Call the method with a converted node + // Since yaml.Node and libyaml.Node have the same structure, + // we can convert using unsafe pointer cast + nodeValue := reflect.NewAt(elemType, reflect.ValueOf(n).UnsafePointer()) + + results := method.Call([]reflect.Value{nodeValue}) + err := results[0].Interface() + + if err == nil { + return true, true + } + + switch e := err.(type) { + case *LoadErrors: + c.TypeErrors = append(c.TypeErrors, e.Errors...) + return true, false + default: + c.TypeErrors = append(c.TypeErrors, &ConstructError{ + Err: e.(error), + Line: n.Line, + Column: n.Column, + }) + return true, false + } +} + +func (c *Constructor) Construct(n *Node, out reflect.Value) (good bool) { + c.constructCount++ + if c.aliasDepth > 0 { + c.aliasCount++ + } + if c.aliasCount > 100 && c.constructCount > 1000 && float64(c.aliasCount)/float64(c.constructCount) > allowedAliasRatio(c.constructCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + + // When out type implements [encoding.TextUnmarshaler], ensure the node is + // a scalar. Otherwise, for example, constructing a YAML mapping into + // a struct having no exported fields, but implementing TextUnmarshaler + // would silently succeed, but do nothing. + // + // Note that this matches the behavior of both encoding/json and encoding/json/v2. + if n.Kind != ScalarNode && isTextUnmarshaler(out) { + err := fmt.Errorf("cannot construct %s into %s (TextUnmarshaler)", shortTag(n.Tag), out.Type()) + c.TypeErrors = append(c.TypeErrors, &ConstructError{ + Err: err, + Line: n.Line, + Column: n.Column, + }) + return false + } + switch n.Kind { + case DocumentNode: + return c.document(n, out) + case AliasNode: + return c.alias(n, out) + } + out, constructed, good := c.prepare(n, out) + if constructed { + return good + } + switch n.Kind { + case ScalarNode: + good = c.scalar(n, out) + case MappingNode: + good = c.mapping(n, out) + case SequenceNode: + good = c.sequence(n, out) + case 0: + if n.IsZero() { + return c.null(out) + } + fallthrough + default: + failf("cannot construct node with unknown kind %d", n.Kind) + } + return good +} + +func (c *Constructor) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + c.doc = n + c.Construct(n.Content[0], out) + return true + } + return false +} + +func (c *Constructor) alias(n *Node, out reflect.Value) (good bool) { + if c.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + c.aliases[n] = true + c.aliasDepth++ + good = c.Construct(n.Alias, out) + c.aliasDepth-- + delete(c.aliases, n) + return good +} + +func (c *Constructor) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (c *Constructor) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved any + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return c.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be constructed into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + c.TypeErrors = append(c.TypeErrors, &ConstructError{ + Err: err, + Line: n.Line, + Column: n.Column, + }) + return false + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Slice: + // allow decoding !!binary-tagged value into []byte specifically + if out.Type().Elem().Kind() == reflect.Uint8 { + if tag == binaryTag { + out.SetBytes([]byte(resolved.(string))) + return true + } + } + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } else if isDuration && resolved == 0 { + out.SetInt(0) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 { + intVal := int64(resolved) + if !out.OverflowInt(intVal) { + out.SetInt(intVal) + return true + } + } + case float64: + if !isDuration && resolved >= math.MinInt64 && resolved <= math.MaxInt64 { + intVal := int64(resolved) + // Verify conversion is lossless (handles floating-point precision) + if float64(intVal) == resolved && !out.OverflowInt(intVal) { + out.SetInt(intVal) + return true + } + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(resolved) { + out.SetUint(resolved) + return true + } + case float64: + if resolved >= 0 && resolved <= math.MaxUint64 { + uintVal := uint64(resolved) + // Verify conversion is lossless (handles floating-point precision) + if float64(uintVal) == resolved && !out.OverflowUint(uintVal) { + out.SetUint(uintVal) + return true + } + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to construct into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Pointer: + panic("yaml internal error: please report the issue") + } + c.tagError(n, tag, out) + return false +} + +func settableValueOf(i any) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (c *Constructor) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]any, l)) + default: + c.tagError(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := c.Construct(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (c *Constructor) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if c.UniqueKeys { + nerrs := len(c.TypeErrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + c.TypeErrors = append(c.TypeErrors, &ConstructError{ + Err: fmt.Errorf("mapping key %#v already defined at line %d", nj.Value, ni.Line), + Line: nj.Line, + Column: nj.Column, + }) + } + } + } + if len(c.TypeErrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return c.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(c.stringMapType) + } else { + out = reflect.MakeMap(c.generalMapType) + } + iface.Set(out) + default: + c.tagError(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := c.stringMapType + generalMapType := c.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + c.stringMapType = outt + } else if outt.Key() == ifaceType { + c.generalMapType = outt + } + } + + mergedFields := c.mergedFields + c.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if c.Construct(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if c.getPossiblyUnhashableKey(mergedFields, ki) { + continue + } + c.setPossiblyUnhashableKey(mergedFields, ki, true) + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("cannot use '%#v' as a map key; try decoding into yaml.Node", k.Interface()) + } + e := reflect.New(et).Elem() + if c.Construct(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + c.mergedFields = mergedFields + if mergeNode != nil { + c.merge(n, mergeNode, out) + } + + c.stringMapType = stringMapType + c.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (c *Constructor) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineConstructors { + field := c.fieldByIndex(n, out, index) + c.prepare(n, field) + } + + mergedFields := c.mergedFields + c.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if c.UniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !c.Construct(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if c.UniqueKeys { + if doneFields[info.Id] { + c.TypeErrors = append(c.TypeErrors, &ConstructError{ + Err: fmt.Errorf("field %s already set in type %s", name.String(), out.Type()), + Line: ni.Line, + Column: ni.Column, + }) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = c.fieldByIndex(n, out, info.Inline) + } + c.Construct(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + c.Construct(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if c.KnownFields { + c.TypeErrors = append(c.TypeErrors, &ConstructError{ + Err: fmt.Errorf("field %s not found in type %s", name.String(), out.Type()), + Line: ni.Line, + Column: ni.Column, + }) + } + } + + c.mergedFields = mergedFields + if mergeNode != nil { + c.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (c *Constructor) setPossiblyUnhashableKey(m map[any]bool, key any, value bool) { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + m[key] = value +} + +func (c *Constructor) getPossiblyUnhashableKey(m map[any]bool, key any) bool { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + return m[key] +} + +func (c *Constructor) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := c.mergedFields + if mergedFields == nil { + c.mergedFields = make(map[any]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if c.Construct(parent.Content[i], k) { + c.setPossiblyUnhashableKey(c.mergedFields, k.Interface(), true) + } + } + } + + switch merge.Kind { + case MappingNode: + c.Construct(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + c.Construct(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + c.Construct(ni, out) + } + default: + failWantMap() + } + + c.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && shortTag(n.Tag) == mergeTag +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/doc.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/doc.go new file mode 100644 index 000000000000..7f8690575d2f --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/doc.go @@ -0,0 +1,8 @@ +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 + +// Package libyaml contains internal helpers for working with YAML +// +// It's a reworked version of the original libyaml package from go-yaml v2/v3, +// adapted to work with Go specifications +package libyaml diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/emitter.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/emitter.go new file mode 100644 index 000000000000..7ea83e89cdbd --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/emitter.go @@ -0,0 +1,2083 @@ +// Copyright 2006-2010 Kirill Simonov +// Copyright 2011-2019 Canonical Ltd +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 AND MIT + +// Emitter stage: Generates YAML output from events. +// Handles formatting, indentation, line wrapping, and output buffering. + +package libyaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func (emitter *Emitter) flushIfNeeded() error { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return emitter.flush() + } + return nil +} + +// Put a character to the output buffer. +func (emitter *Emitter) put(value byte) error { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + if err := emitter.flush(); err != nil { + return err + } + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return nil +} + +// Put a line break to the output buffer. +func (emitter *Emitter) putLineBreak() error { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + if err := emitter.flush(); err != nil { + return err + } + } + switch emitter.line_break { + case CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return nil +} + +// Copy a character from a string into buffer. +func (emitter *Emitter) write(s []byte, i *int) error { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + if err := emitter.flush(); err != nil { + return err + } + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return nil +} + +// Write a whole string into buffer. +func (emitter *Emitter) writeAll(s []byte) error { + for i := 0; i < len(s); { + if err := emitter.write(s, &i); err != nil { + return err + } + } + return nil +} + +// Copy a line break character from a string into buffer. +func (emitter *Emitter) writeLineBreak(s []byte, i *int) error { + if s[*i] == '\n' { + if err := emitter.putLineBreak(); err != nil { + return err + } + *i++ + } else { + if err := emitter.write(s, i); err != nil { + return err + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return nil +} + +// Emit an event. +func (emitter *Emitter) Emit(event *Event) error { + emitter.events = append(emitter.events, *event) + for !emitter.needMoreEvents() { + event := &emitter.events[emitter.events_head] + if err := emitter.analyzeEvent(event); err != nil { + return err + } + if err := emitter.stateMachine(event); err != nil { + return err + } + event.Delete() + emitter.events_head++ + } + return nil +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +func (emitter *Emitter) needMoreEvents() bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].Type { + case DOCUMENT_START_EVENT: + accumulate = 1 + case SEQUENCE_START_EVENT: + accumulate = 2 + case MAPPING_START_EVENT: + accumulate = 3 + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].Type { + case STREAM_START_EVENT, DOCUMENT_START_EVENT, SEQUENCE_START_EVENT, MAPPING_START_EVENT: + level++ + case STREAM_END_EVENT, DOCUMENT_END_EVENT, SEQUENCE_END_EVENT, MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func (emitter *Emitter) appendTagDirective(value *TagDirective, allow_duplicates bool) error { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return nil + } + return EmitterError{ + Message: "duplicate %TAG directive", + } + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := TagDirective{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return nil +} + +// Increase the indentation level. +func (emitter *Emitter) increaseIndentCompact(flow, indentless bool, compact_seq bool) error { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.BestIndent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.BestIndent * ((emitter.indent + emitter.BestIndent) / emitter.BestIndent) + if compact_seq { + // The value compact_seq passed in is almost always set to `false` when this function is called, + // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we + // are increasing the indent to account for sequence nodes, which will be correct because we need to + // subtract 2 to account for the - at the beginning of the sequence node. + emitter.indent = emitter.indent - 2 + } + } + } + return nil +} + +// State dispatcher. +func (emitter *Emitter) stateMachine(event *Event) error { + switch emitter.state { + default: + case EMIT_STREAM_START_STATE: + return emitter.emitStreamStart(event) + + case EMIT_FIRST_DOCUMENT_START_STATE: + return emitter.emitDocumentStart(event, true) + + case EMIT_DOCUMENT_START_STATE: + return emitter.emitDocumentStart(event, false) + + case EMIT_DOCUMENT_CONTENT_STATE: + return emitter.emitDocumentContent(event) + + case EMIT_DOCUMENT_END_STATE: + return emitter.emitDocumentEnd(event) + + case EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return emitter.emitFlowSequenceItem(event, true, false) + + case EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return emitter.emitFlowSequenceItem(event, false, true) + + case EMIT_FLOW_SEQUENCE_ITEM_STATE: + return emitter.emitFlowSequenceItem(event, false, false) + + case EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return emitter.emitFlowMappingKey(event, true, false) + + case EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return emitter.emitFlowMappingKey(event, false, true) + + case EMIT_FLOW_MAPPING_KEY_STATE: + return emitter.emitFlowMappingKey(event, false, false) + + case EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return emitter.emitFlowMappingValue(event, true) + + case EMIT_FLOW_MAPPING_VALUE_STATE: + return emitter.emitFlowMappingValue(event, false) + + case EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return emitter.emitBlockSequenceItem(event, true) + + case EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return emitter.emitBlockSequenceItem(event, false) + + case EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return emitter.emitBlockMappingKey(event, true) + + case EMIT_BLOCK_MAPPING_KEY_STATE: + return emitter.emitBlockMappingKey(event, false) + + case EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return emitter.emitBlockMappingValue(event, true) + + case EMIT_BLOCK_MAPPING_VALUE_STATE: + return emitter.emitBlockMappingValue(event, false) + + case EMIT_END_STATE: + return EmitterError{ + Message: "expected nothing after STREAM-END", + } + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func (emitter *Emitter) emitStreamStart(event *Event) error { + if event.Type != STREAM_START_EVENT { + return EmitterError{ + Message: "expected STREAM-START", + } + } + if emitter.encoding == ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == ANY_ENCODING { + emitter.encoding = UTF8_ENCODING + } + } + if emitter.BestIndent < 2 || emitter.BestIndent > 9 { + emitter.BestIndent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.BestIndent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == ANY_BREAK { + emitter.line_break = LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != UTF8_ENCODING { + if err := emitter.writeBom(); err != nil { + return err + } + } + emitter.state = EMIT_FIRST_DOCUMENT_START_STATE + return nil +} + +// Expect DOCUMENT-START or STREAM-END. +func (emitter *Emitter) emitDocumentStart(event *Event, first bool) error { + if event.Type == DOCUMENT_START_EVENT { + + if event.versionDirective != nil { + if err := emitter.analyzeVersionDirective(event.versionDirective); err != nil { + return err + } + } + + for i := 0; i < len(event.tagDirectives); i++ { + tag_directive := &event.tagDirectives[i] + if err := emitter.analyzeTagDirective(tag_directive); err != nil { + return err + } + if err := emitter.appendTagDirective(tag_directive, false); err != nil { + return err + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if err := emitter.appendTagDirective(tag_directive, true); err != nil { + return err + } + } + + implicit := event.Implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.OpenEnded && (event.versionDirective != nil || len(event.tagDirectives) > 0) { + if err := emitter.writeIndicator([]byte("..."), true, false, false); err != nil { + return err + } + if err := emitter.writeIndent(); err != nil { + return err + } + } + + if event.versionDirective != nil { + implicit = false + if err := emitter.writeIndicator([]byte("%YAML"), true, false, false); err != nil { + return err + } + if err := emitter.writeIndicator([]byte("1.1"), true, false, false); err != nil { + return err + } + if err := emitter.writeIndent(); err != nil { + return err + } + } + + if len(event.tagDirectives) > 0 { + implicit = false + for i := 0; i < len(event.tagDirectives); i++ { + tag_directive := &event.tagDirectives[i] + if err := emitter.writeIndicator([]byte("%TAG"), true, false, false); err != nil { + return err + } + if err := emitter.writeTagHandle(tag_directive.handle); err != nil { + return err + } + if err := emitter.writeTagContent(tag_directive.prefix, true); err != nil { + return err + } + if err := emitter.writeIndent(); err != nil { + return err + } + } + } + + if emitter.checkEmptyDocument() { + implicit = false + } + if !implicit { + if err := emitter.writeIndent(); err != nil { + return err + } + if err := emitter.writeIndicator([]byte("---"), true, false, false); err != nil { + return err + } + if emitter.canonical || true { + if err := emitter.writeIndent(); err != nil { + return err + } + } + } + + if len(emitter.HeadComment) > 0 { + if err := emitter.processHeadComment(); err != nil { + return err + } + if err := emitter.putLineBreak(); err != nil { + return err + } + } + + emitter.state = EMIT_DOCUMENT_CONTENT_STATE + return nil + } + + if event.Type == STREAM_END_EVENT { + if emitter.OpenEnded { + if err := emitter.writeIndicator([]byte("..."), true, false, false); err != nil { + return err + } + if err := emitter.writeIndent(); err != nil { + return err + } + } + if err := emitter.flush(); err != nil { + return err + } + emitter.state = EMIT_END_STATE + return nil + } + + return EmitterError{ + Message: "expected DOCUMENT-START or STREAM-END", + } +} + +// emitter preserves the original signature and delegates to +// increaseIndentCompact without compact-sequence indentation +func (emitter *Emitter) increaseIndent(flow, indentless bool) error { + return emitter.increaseIndentCompact(flow, indentless, false) +} + +// processLineComment preserves the original signature and delegates to +// processLineCommentLinebreak passing false for linebreak +func (emitter *Emitter) processLineComment() error { + return emitter.processLineCommentLinebreak(false) +} + +// Expect the root node. +func (emitter *Emitter) emitDocumentContent(event *Event) error { + emitter.states = append(emitter.states, EMIT_DOCUMENT_END_STATE) + + if err := emitter.processHeadComment(); err != nil { + return err + } + if err := emitter.emitNode(event, true, false, false, false); err != nil { + return err + } + if err := emitter.processLineComment(); err != nil { + return err + } + if err := emitter.processFootComment(); err != nil { + return err + } + return nil +} + +// Expect DOCUMENT-END. +func (emitter *Emitter) emitDocumentEnd(event *Event) error { + if event.Type != DOCUMENT_END_EVENT { + return EmitterError{ + Message: "expected DOCUMENT-END", + } + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if err := emitter.processFootComment(); err != nil { + return err + } + emitter.foot_indent = -1 + if err := emitter.writeIndent(); err != nil { + return err + } + if !event.Implicit { + // [Go] Allocate the slice elsewhere. + if err := emitter.writeIndicator([]byte("..."), true, false, false); err != nil { + return err + } + if err := emitter.writeIndent(); err != nil { + return err + } + } + if err := emitter.flush(); err != nil { + return err + } + emitter.state = EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return nil +} + +// Expect a flow item node. +func (emitter *Emitter) emitFlowSequenceItem(event *Event, first, trail bool) error { + if first { + if err := emitter.writeIndicator([]byte{'['}, true, true, false); err != nil { + return err + } + if err := emitter.increaseIndent(true, false); err != nil { + return err + } + emitter.flow_level++ + } + + if event.Type == SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if err := emitter.writeIndicator([]byte{','}, false, false, false); err != nil { + return err + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if err := emitter.writeIndicator([]byte{']'}, false, false, false); err != nil { + return err + } + if err := emitter.processLineComment(); err != nil { + return err + } + if err := emitter.processFootComment(); err != nil { + return err + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return nil + } + + if !first && !trail { + if err := emitter.writeIndicator([]byte{','}, false, false, false); err != nil { + return err + } + } + + if err := emitter.processHeadComment(); err != nil { + return err + } + if emitter.column == 0 { + if err := emitter.writeIndent(); err != nil { + return err + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if len(emitter.LineComment)+len(emitter.FootComment)+len(emitter.TailComment) > 0 { + emitter.states = append(emitter.states, EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if err := emitter.emitNode(event, false, true, false, false); err != nil { + return err + } + if len(emitter.LineComment)+len(emitter.FootComment)+len(emitter.TailComment) > 0 { + if err := emitter.writeIndicator([]byte{','}, false, false, false); err != nil { + return err + } + } + if err := emitter.processLineComment(); err != nil { + return err + } + if err := emitter.processFootComment(); err != nil { + return err + } + return nil +} + +// Expect a flow key node. +func (emitter *Emitter) emitFlowMappingKey(event *Event, first, trail bool) error { + if first { + if err := emitter.writeIndicator([]byte{'{'}, true, true, false); err != nil { + return err + } + if err := emitter.increaseIndent(true, false); err != nil { + return err + } + emitter.flow_level++ + } + + if event.Type == MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.HeadComment)+len(emitter.FootComment)+len(emitter.TailComment) > 0) && !first && !trail { + if err := emitter.writeIndicator([]byte{','}, false, false, false); err != nil { + return err + } + } + if err := emitter.processHeadComment(); err != nil { + return err + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if err := emitter.writeIndicator([]byte{'}'}, false, false, false); err != nil { + return err + } + if err := emitter.processLineComment(); err != nil { + return err + } + if err := emitter.processFootComment(); err != nil { + return err + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return nil + } + + if !first && !trail { + if err := emitter.writeIndicator([]byte{','}, false, false, false); err != nil { + return err + } + } + + if err := emitter.processHeadComment(); err != nil { + return err + } + + if emitter.column == 0 { + if err := emitter.writeIndent(); err != nil { + return err + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if err := emitter.writeIndent(); err != nil { + return err + } + } + + if !emitter.canonical && emitter.checkSimpleKey() { + emitter.states = append(emitter.states, EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return emitter.emitNode(event, false, false, true, true) + } + if err := emitter.writeIndicator([]byte{'?'}, true, false, false); err != nil { + return err + } + emitter.states = append(emitter.states, EMIT_FLOW_MAPPING_VALUE_STATE) + return emitter.emitNode(event, false, false, true, false) +} + +// Expect a flow value node. +func (emitter *Emitter) emitFlowMappingValue(event *Event, simple bool) error { + if simple { + if err := emitter.writeIndicator([]byte{':'}, false, false, false); err != nil { + return err + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if err := emitter.writeIndicator([]byte{':'}, true, false, false); err != nil { + return err + } + } + if len(emitter.LineComment)+len(emitter.FootComment)+len(emitter.TailComment) > 0 { + emitter.states = append(emitter.states, EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, EMIT_FLOW_MAPPING_KEY_STATE) + } + if err := emitter.emitNode(event, false, false, true, false); err != nil { + return err + } + if len(emitter.LineComment)+len(emitter.FootComment)+len(emitter.TailComment) > 0 { + if err := emitter.writeIndicator([]byte{','}, false, false, false); err != nil { + return err + } + } + if err := emitter.processLineComment(); err != nil { + return err + } + if err := emitter.processFootComment(); err != nil { + return err + } + return nil +} + +// Expect a block item node. +func (emitter *Emitter) emitBlockSequenceItem(event *Event, first bool) error { + if first { + // emitter.mapping context tells us if we are currently in a mapping context. + // emitter.column tells us which column we are in the yaml output. 0 is the first char of the column. + // emitter.indentation tells us if the last character was an indentation character. + // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements. + // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or + // the last character was not an indentation character, and we consider '- ' part of the indentation + // for sequence elements. + seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) && + emitter.CompactSequenceIndent + if err := emitter.increaseIndentCompact(false, false, seq); err != nil { + return err + } + } + if event.Type == SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return nil + } + if err := emitter.processHeadComment(); err != nil { + return err + } + if err := emitter.writeIndent(); err != nil { + return err + } + if err := emitter.writeIndicator([]byte{'-'}, true, false, true); err != nil { + return err + } + emitter.states = append(emitter.states, EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if err := emitter.emitNode(event, false, true, false, false); err != nil { + return err + } + if err := emitter.processLineComment(); err != nil { + return err + } + if err := emitter.processFootComment(); err != nil { + return err + } + return nil +} + +// Expect a block key node. +func (emitter *Emitter) emitBlockMappingKey(event *Event, first bool) error { + if first { + if err := emitter.increaseIndent(false, false); err != nil { + return err + } + } + if err := emitter.processHeadComment(); err != nil { + return err + } + if event.Type == MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return nil + } + if err := emitter.writeIndent(); err != nil { + return err + } + if len(emitter.LineComment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.LineComment + emitter.LineComment = nil + } + if emitter.checkSimpleKey() { + emitter.states = append(emitter.states, EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + if err := emitter.emitNode(event, false, false, true, true); err != nil { + return err + } + + if event.Type == ALIAS_EVENT { + // make sure there's a space after the alias + return emitter.put(' ') + } + + return nil + } + if err := emitter.writeIndicator([]byte{'?'}, true, false, true); err != nil { + return err + } + emitter.states = append(emitter.states, EMIT_BLOCK_MAPPING_VALUE_STATE) + return emitter.emitNode(event, false, false, true, false) +} + +// Expect a block value node. +func (emitter *Emitter) emitBlockMappingValue(event *Event, simple bool) error { + if simple { + if err := emitter.writeIndicator([]byte{':'}, false, false, false); err != nil { + return err + } + } else { + if err := emitter.writeIndent(); err != nil { + return err + } + if err := emitter.writeIndicator([]byte{':'}, true, false, true); err != nil { + return err + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.Type == SCALAR_EVENT { + if len(emitter.LineComment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.LineComment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.SequenceStyle() != FLOW_SEQUENCE_STYLE && (event.Type == MAPPING_START_EVENT || event.Type == SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.LineComment, emitter.key_line_comment = emitter.key_line_comment, emitter.LineComment + if err := emitter.processLineComment(); err != nil { + return err + } + emitter.LineComment, emitter.key_line_comment = emitter.key_line_comment, emitter.LineComment + } + } + emitter.states = append(emitter.states, EMIT_BLOCK_MAPPING_KEY_STATE) + if err := emitter.emitNode(event, false, false, true, false); err != nil { + return err + } + if err := emitter.processLineComment(); err != nil { + return err + } + if err := emitter.processFootComment(); err != nil { + return err + } + return nil +} + +func (emitter *Emitter) silentNilEvent(event *Event) bool { + return event.Type == SCALAR_EVENT && event.Implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func (emitter *Emitter) emitNode(event *Event, + root bool, sequence bool, mapping bool, simple_key bool, +) error { + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.Type { + case ALIAS_EVENT: + return emitter.emitAlias(event) + case SCALAR_EVENT: + return emitter.emitScalar(event) + case SEQUENCE_START_EVENT: + return emitter.emitSequenceStart(event) + case MAPPING_START_EVENT: + return emitter.emitMappingStart(event) + default: + return EmitterError{ + Message: fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.Type), + } + } +} + +// Expect ALIAS. +func (emitter *Emitter) emitAlias(event *Event) error { + if err := emitter.processAnchor(); err != nil { + return err + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return nil +} + +// requiredQuoteStyle returns the appropriate quote style based on the +// emitter's quotePreference setting. +func (emitter *Emitter) requiredQuoteStyle() ScalarStyle { + if emitter.quotePreference == QuoteDouble { + return DOUBLE_QUOTED_SCALAR_STYLE + } + return SINGLE_QUOTED_SCALAR_STYLE +} + +// Expect SCALAR. +func (emitter *Emitter) emitScalar(event *Event) error { + if err := emitter.selectScalarStyle(event); err != nil { + return err + } + if err := emitter.processAnchor(); err != nil { + return err + } + if err := emitter.processTag(); err != nil { + return err + } + if err := emitter.increaseIndent(true, false); err != nil { + return err + } + if err := emitter.processScalar(); err != nil { + return err + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return nil +} + +// Expect SEQUENCE-START. +func (emitter *Emitter) emitSequenceStart(event *Event) error { + if err := emitter.processAnchor(); err != nil { + return err + } + if err := emitter.processTag(); err != nil { + return err + } + if emitter.flow_level > 0 || emitter.canonical || event.SequenceStyle() == FLOW_SEQUENCE_STYLE || + emitter.checkEmptySequence() { + emitter.state = EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return nil +} + +// Expect MAPPING-START. +func (emitter *Emitter) emitMappingStart(event *Event) error { + if err := emitter.processAnchor(); err != nil { + return err + } + if err := emitter.processTag(); err != nil { + return err + } + if emitter.flow_level > 0 || emitter.canonical || event.MappingStyle() == FLOW_MAPPING_STYLE || + emitter.checkEmptyMapping() { + emitter.state = EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return nil +} + +// Check if the document content is an empty scalar. +func (emitter *Emitter) checkEmptyDocument() bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func (emitter *Emitter) checkEmptySequence() bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].Type == SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].Type == SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func (emitter *Emitter) checkEmptyMapping() bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].Type == MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].Type == MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func (emitter *Emitter) checkSimpleKey() bool { + length := 0 + switch emitter.events[emitter.events_head].Type { + case ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case SEQUENCE_START_EVENT: + if !emitter.checkEmptySequence() { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case MAPPING_START_EVENT: + if !emitter.checkEmptyMapping() { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func (emitter *Emitter) selectScalarStyle(event *Event) error { + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.Implicit && !event.quoted_implicit { + return EmitterError{ + Message: "neither tag nor implicit flags are specified", + } + } + + style := event.ScalarStyle() + if style == ANY_SCALAR_STYLE { + style = PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = emitter.requiredQuoteStyle() + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = emitter.requiredQuoteStyle() + } + if no_tag && !event.Implicit { + style = emitter.requiredQuoteStyle() + } + } + if style == SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == LITERAL_SCALAR_STYLE || style == FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return nil +} + +// Write an anchor. +func (emitter *Emitter) processAnchor() error { + if emitter.anchor_data.anchor == nil { + return nil + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if err := emitter.writeIndicator(c, true, false, false); err != nil { + return err + } + return emitter.writeAnchor(emitter.anchor_data.anchor) +} + +// Write a tag. +func (emitter *Emitter) processTag() error { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return nil + } + if len(emitter.tag_data.handle) > 0 { + if err := emitter.writeTagHandle(emitter.tag_data.handle); err != nil { + return err + } + if len(emitter.tag_data.suffix) > 0 { + if err := emitter.writeTagContent(emitter.tag_data.suffix, false); err != nil { + return err + } + } + } else { + // [Go] Allocate these slices elsewhere. + if err := emitter.writeIndicator([]byte("!<"), true, false, false); err != nil { + return err + } + if err := emitter.writeTagContent(emitter.tag_data.suffix, false); err != nil { + return err + } + if err := emitter.writeIndicator([]byte{'>'}, false, false, false); err != nil { + return err + } + } + return nil +} + +// Write a scalar. +func (emitter *Emitter) processScalar() error { + switch emitter.scalar_data.style { + case PLAIN_SCALAR_STYLE: + return emitter.writePlainScalar(emitter.scalar_data.value, !emitter.simple_key_context) + + case SINGLE_QUOTED_SCALAR_STYLE: + return emitter.writeSingleQuotedScalar(emitter.scalar_data.value, !emitter.simple_key_context) + + case DOUBLE_QUOTED_SCALAR_STYLE: + return emitter.writeDoubleQuotedScalar(emitter.scalar_data.value, !emitter.simple_key_context) + + case LITERAL_SCALAR_STYLE: + return emitter.writeLiteralScalar(emitter.scalar_data.value) + + case FOLDED_SCALAR_STYLE: + return emitter.writeFoldedScalar(emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func (emitter *Emitter) processHeadComment() error { + if len(emitter.TailComment) > 0 { + if err := emitter.writeIndent(); err != nil { + return err + } + if err := emitter.writeComment(emitter.TailComment); err != nil { + return err + } + emitter.TailComment = emitter.TailComment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.HeadComment) == 0 { + return nil + } + if err := emitter.writeIndent(); err != nil { + return err + } + if err := emitter.writeComment(emitter.HeadComment); err != nil { + return err + } + emitter.HeadComment = emitter.HeadComment[:0] + return nil +} + +// Write an line comment. +func (emitter *Emitter) processLineCommentLinebreak(linebreak bool) error { + if len(emitter.LineComment) == 0 { + // The next 3 lines are needed to resolve an issue with leading newlines + // See https://github.com/go-yaml/yaml/issues/755 + // When linebreak is set to true, put_break will be called and will add + // the needed newline. + if linebreak { + if err := emitter.putLineBreak(); err != nil { + return err + } + } + return nil + } + if !emitter.whitespace { + if err := emitter.put(' '); err != nil { + return err + } + } + if err := emitter.writeComment(emitter.LineComment); err != nil { + return err + } + emitter.LineComment = emitter.LineComment[:0] + return nil +} + +// Write a foot comment. +func (emitter *Emitter) processFootComment() error { + if len(emitter.FootComment) == 0 { + return nil + } + if err := emitter.writeIndent(); err != nil { + return err + } + if err := emitter.writeComment(emitter.FootComment); err != nil { + return err + } + emitter.FootComment = emitter.FootComment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return nil +} + +// Check if a %YAML directive is valid. +func (emitter *Emitter) analyzeVersionDirective(version_directive *VersionDirective) error { + if version_directive.major != 1 || version_directive.minor != 1 { + return EmitterError{ + Message: "incompatible %YAML directive", + } + } + return nil +} + +// Check if a %TAG directive is valid. +func (emitter *Emitter) analyzeTagDirective(tag_directive *TagDirective) error { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return EmitterError{ + Message: "tag handle must not be empty", + } + } + if handle[0] != '!' { + return EmitterError{ + Message: "tag handle must start with '!'", + } + } + if handle[len(handle)-1] != '!' { + return EmitterError{ + Message: "tag handle must end with '!'", + } + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !isAlpha(handle, i) { + return EmitterError{ + Message: "tag handle must contain alphanumerical characters only", + } + } + } + if len(prefix) == 0 { + return EmitterError{ + Message: "tag prefix must not be empty", + } + } + return nil +} + +// Check if an anchor is valid. +func (emitter *Emitter) analyzeAnchor(anchor []byte, alias bool) error { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return EmitterError{ + Message: problem, + } + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !isAnchorChar(anchor, i) { + problem := "anchor value must contain valid characters only" + if alias { + problem = "alias value must contain valid characters only" + } + return EmitterError{ + Message: problem, + } + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return nil +} + +// Check if a tag is valid. +func (emitter *Emitter) analyzeTag(tag []byte) error { + if len(tag) == 0 { + return EmitterError{ + Message: "tag value must not be empty", + } + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return nil + } + } + emitter.tag_data.suffix = tag + return nil +} + +// Check if a scalar is valid. +func (emitter *Emitter) analyzeScalar(value []byte) error { + var block_indicators, + flow_indicators, + line_breaks, + special_characters, + tab_characters, + + leading_space, + leading_break, + trailing_space, + trailing_break, + break_space, + space_break, + + preceded_by_whitespace, + followed_by_whitespace, + previous_space, + previous_break bool + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return nil + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || isBlank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !isPrintable(value, i) || !isASCII(value, i) && !emitter.unicode { + special_characters = true + } + if isSpace(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if isLineBreak(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = isBlankOrZero(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return nil +} + +// Check if the event data is valid. +func (emitter *Emitter) analyzeEvent(event *Event) error { + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.HeadComment) > 0 { + emitter.HeadComment = event.HeadComment + } + if len(event.LineComment) > 0 { + emitter.LineComment = event.LineComment + } + if len(event.FootComment) > 0 { + emitter.FootComment = event.FootComment + } + if len(event.TailComment) > 0 { + emitter.TailComment = event.TailComment + } + + switch event.Type { + case ALIAS_EVENT: + if err := emitter.analyzeAnchor(event.Anchor, true); err != nil { + return err + } + + case SCALAR_EVENT: + if len(event.Anchor) > 0 { + if err := emitter.analyzeAnchor(event.Anchor, false); err != nil { + return err + } + } + if len(event.Tag) > 0 && (emitter.canonical || (!event.Implicit && !event.quoted_implicit)) { + if err := emitter.analyzeTag(event.Tag); err != nil { + return err + } + } + if err := emitter.analyzeScalar(event.Value); err != nil { + return err + } + + case SEQUENCE_START_EVENT: + if len(event.Anchor) > 0 { + if err := emitter.analyzeAnchor(event.Anchor, false); err != nil { + return err + } + } + if len(event.Tag) > 0 && (emitter.canonical || !event.Implicit) { + if err := emitter.analyzeTag(event.Tag); err != nil { + return err + } + } + + case MAPPING_START_EVENT: + if len(event.Anchor) > 0 { + if err := emitter.analyzeAnchor(event.Anchor, false); err != nil { + return err + } + } + if len(event.Tag) > 0 && (emitter.canonical || !event.Implicit) { + if err := emitter.analyzeTag(event.Tag); err != nil { + return err + } + } + } + return nil +} + +// Write the BOM character. +func (emitter *Emitter) writeBom() error { + if err := emitter.flushIfNeeded(); err != nil { + return err + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return nil +} + +func (emitter *Emitter) writeIndent() error { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if err := emitter.putLineBreak(); err != nil { + return err + } + } + if emitter.foot_indent == indent { + if err := emitter.putLineBreak(); err != nil { + return err + } + } + for emitter.column < indent { + if err := emitter.put(' '); err != nil { + return err + } + } + emitter.whitespace = true + emitter.space_above = false + emitter.foot_indent = -1 + return nil +} + +func (emitter *Emitter) writeIndicator(indicator []byte, need_whitespace, is_whitespace, is_indention bool) error { + if need_whitespace && !emitter.whitespace { + if err := emitter.put(' '); err != nil { + return err + } + } + if err := emitter.writeAll(indicator); err != nil { + return err + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.OpenEnded = false + return nil +} + +func (emitter *Emitter) writeAnchor(value []byte) error { + if err := emitter.writeAll(value); err != nil { + return err + } + emitter.whitespace = false + emitter.indention = false + return nil +} + +func (emitter *Emitter) writeTagHandle(value []byte) error { + if !emitter.whitespace { + if err := emitter.put(' '); err != nil { + return err + } + } + if err := emitter.writeAll(value); err != nil { + return err + } + emitter.whitespace = false + emitter.indention = false + return nil +} + +func (emitter *Emitter) writeTagContent(value []byte, need_whitespace bool) error { + if need_whitespace && !emitter.whitespace { + if err := emitter.put(' '); err != nil { + return err + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = isAlpha(value, i) + } + if must_write { + if err := emitter.write(value, &i); err != nil { + return err + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if err := emitter.put('%'); err != nil { + return err + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if err := emitter.put(c); err != nil { + return err + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if err := emitter.put(c); err != nil { + return err + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return nil +} + +func (emitter *Emitter) writePlainScalar(value []byte, allow_breaks bool) error { + if len(value) > 0 && !emitter.whitespace { + if err := emitter.put(' '); err != nil { + return err + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if isSpace(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !isSpace(value, i+1) { + if err := emitter.writeIndent(); err != nil { + return err + } + i += width(value[i]) + } else { + if err := emitter.write(value, &i); err != nil { + return err + } + } + spaces = true + } else if isLineBreak(value, i) { + if !breaks && value[i] == '\n' { + if err := emitter.putLineBreak(); err != nil { + return err + } + } + if err := emitter.writeLineBreak(value, &i); err != nil { + return err + } + breaks = true + } else { + if breaks { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if err := emitter.write(value, &i); err != nil { + return err + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.OpenEnded = true + } + + return nil +} + +func (emitter *Emitter) writeSingleQuotedScalar(value []byte, allow_breaks bool) error { + if err := emitter.writeIndicator([]byte{'\''}, true, false, false); err != nil { + return err + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if isSpace(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !isSpace(value, i+1) { + if err := emitter.writeIndent(); err != nil { + return err + } + i += width(value[i]) + } else { + if err := emitter.write(value, &i); err != nil { + return err + } + } + spaces = true + } else if isLineBreak(value, i) { + if !breaks && value[i] == '\n' { + if err := emitter.putLineBreak(); err != nil { + return err + } + } + if err := emitter.writeLineBreak(value, &i); err != nil { + return err + } + breaks = true + } else { + if breaks { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if value[i] == '\'' { + if err := emitter.put('\''); err != nil { + return err + } + } + if err := emitter.write(value, &i); err != nil { + return err + } + emitter.indention = false + spaces = false + breaks = false + } + } + if err := emitter.writeIndicator([]byte{'\''}, false, false, false); err != nil { + return err + } + emitter.whitespace = false + emitter.indention = false + return nil +} + +func (emitter *Emitter) writeDoubleQuotedScalar(value []byte, allow_breaks bool) error { + spaces := false + if err := emitter.writeIndicator([]byte{'"'}, true, false, false); err != nil { + return err + } + + for i := 0; i < len(value); { + if !isPrintable(value, i) || (!emitter.unicode && !isASCII(value, i)) || + isBOM(value, i) || isLineBreak(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if err := emitter.put('\\'); err != nil { + return err + } + + var err error + switch v { + case 0x00: + err = emitter.put('0') + case 0x07: + err = emitter.put('a') + case 0x08: + err = emitter.put('b') + case 0x09: + err = emitter.put('t') + case 0x0A: + err = emitter.put('n') + case 0x0b: + err = emitter.put('v') + case 0x0c: + err = emitter.put('f') + case 0x0d: + err = emitter.put('r') + case 0x1b: + err = emitter.put('e') + case 0x22: + err = emitter.put('"') + case 0x5c: + err = emitter.put('\\') + case 0x85: + err = emitter.put('N') + case 0xA0: + err = emitter.put('_') + case 0x2028: + err = emitter.put('L') + case 0x2029: + err = emitter.put('P') + default: + if v <= 0xFF { + err = emitter.put('x') + w = 2 + } else if v <= 0xFFFF { + err = emitter.put('u') + w = 4 + } else { + err = emitter.put('U') + w = 8 + } + for k := (w - 1) * 4; err == nil && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + err = emitter.put(digit + '0') + } else { + err = emitter.put(digit + 'A' - 10) + } + } + } + if err != nil { + return err + } + spaces = false + } else if isSpace(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if err := emitter.writeIndent(); err != nil { + return err + } + if isSpace(value, i+1) { + if err := emitter.put('\\'); err != nil { + return err + } + } + i += width(value[i]) + } else if err := emitter.write(value, &i); err != nil { + return err + } + spaces = true + } else { + if err := emitter.write(value, &i); err != nil { + return err + } + spaces = false + } + } + if err := emitter.writeIndicator([]byte{'"'}, false, false, false); err != nil { + return err + } + emitter.whitespace = false + emitter.indention = false + return nil +} + +func (emitter *Emitter) writeBlockScalarHints(value []byte) error { + if isSpace(value, 0) { + // https://github.com/yaml/go-yaml/issues/65 + // isLineBreak(value, 0) removed as the linebreak will only write the indentation value. + indent_hint := []byte{'0' + byte(emitter.BestIndent)} + if err := emitter.writeIndicator(indent_hint, false, false, false); err != nil { + return err + } + } + + emitter.OpenEnded = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !isLineBreak(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.OpenEnded = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if isLineBreak(value, i) { + chomp_hint[0] = '+' + emitter.OpenEnded = true + } + } + } + if chomp_hint[0] != 0 { + if err := emitter.writeIndicator(chomp_hint[:], false, false, false); err != nil { + return err + } + } + return nil +} + +func (emitter *Emitter) writeLiteralScalar(value []byte) error { + if err := emitter.writeIndicator([]byte{'|'}, true, false, false); err != nil { + return err + } + if err := emitter.writeBlockScalarHints(value); err != nil { + return err + } + if err := emitter.processLineCommentLinebreak(true); err != nil { + return err + } + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if isLineBreak(value, i) { + if err := emitter.writeLineBreak(value, &i); err != nil { + return err + } + breaks = true + } else { + if breaks { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if err := emitter.write(value, &i); err != nil { + return err + } + emitter.indention = false + breaks = false + } + } + + return nil +} + +func (emitter *Emitter) writeFoldedScalar(value []byte) error { + if err := emitter.writeIndicator([]byte{'>'}, true, false, false); err != nil { + return err + } + if err := emitter.writeBlockScalarHints(value); err != nil { + return err + } + if err := emitter.processLineCommentLinebreak(true); err != nil { + return err + } + + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if isLineBreak(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for isLineBreak(value, k) { + k += width(value[k]) + } + if !isBlankOrZero(value, k) { + if err := emitter.putLineBreak(); err != nil { + return err + } + } + } + if err := emitter.writeLineBreak(value, &i); err != nil { + return err + } + breaks = true + } else { + if breaks { + if err := emitter.writeIndent(); err != nil { + return err + } + leading_spaces = isBlank(value, i) + } + if !breaks && isSpace(value, i) && !isSpace(value, i+1) && emitter.column > emitter.best_width { + if err := emitter.writeIndent(); err != nil { + return err + } + i += width(value[i]) + } else { + if err := emitter.write(value, &i); err != nil { + return err + } + } + emitter.indention = false + breaks = false + } + } + return nil +} + +func (emitter *Emitter) writeComment(comment []byte) error { + breaks := false + pound := false + for i := 0; i < len(comment); { + if isLineBreak(comment, i) { + if err := emitter.writeLineBreak(comment, &i); err != nil { + return err + } + breaks = true + pound = false + } else { + if breaks { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if !pound { + if comment[i] != '#' { + if err := emitter.put('#'); err != nil { + return err + } + if err := emitter.put(' '); err != nil { + return err + } + } + pound = true + } + if err := emitter.write(comment, &i); err != nil { + return err + } + emitter.indention = false + breaks = false + } + } + if !breaks { + if err := emitter.putLineBreak(); err != nil { + return err + } + } + + emitter.whitespace = true + return nil +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/errors.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/errors.go new file mode 100644 index 000000000000..8b279bfdaa72 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/errors.go @@ -0,0 +1,171 @@ +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 + +// Error types for YAML parsing and emitting. +// Provides structured error reporting with line/column information. + +package libyaml + +import ( + "errors" + "fmt" + "strings" +) + +type MarkedYAMLError struct { + // optional context + ContextMark Mark + ContextMessage string + + Mark Mark + Message string +} + +func (e MarkedYAMLError) Error() string { + var builder strings.Builder + builder.WriteString("yaml: ") + if len(e.ContextMessage) > 0 { + fmt.Fprintf(&builder, "%s at %s: ", e.ContextMessage, e.ContextMark) + } + if len(e.ContextMessage) == 0 || e.ContextMark != e.Mark { + fmt.Fprintf(&builder, "%s: ", e.Mark) + } + builder.WriteString(e.Message) + return builder.String() +} + +type ParserError MarkedYAMLError + +func (e ParserError) Error() string { + return MarkedYAMLError(e).Error() +} + +type ScannerError MarkedYAMLError + +func (e ScannerError) Error() string { + return MarkedYAMLError(e).Error() +} + +type ReaderError struct { + Offset int + Value int + Err error +} + +func (e ReaderError) Error() string { + return fmt.Sprintf("yaml: offset %d: %s", e.Offset, e.Err) +} + +func (e ReaderError) Unwrap() error { + return e.Err +} + +type EmitterError struct { + Message string +} + +func (e EmitterError) Error() string { + return fmt.Sprintf("yaml: %s", e.Message) +} + +type WriterError struct { + Err error +} + +func (e WriterError) Error() string { + return fmt.Sprintf("yaml: %s", e.Err) +} + +func (e WriterError) Unwrap() error { + return e.Err +} + +// ConstructError represents a single, non-fatal error that occurred during +// the constructing of a YAML document into a Go value. +type ConstructError struct { + Err error + Line int + Column int +} + +func (e *ConstructError) Error() string { + return fmt.Sprintf("line %d: %s", e.Line, e.Err.Error()) +} + +func (e *ConstructError) Unwrap() error { + return e.Err +} + +// LoadErrors is returned when one or more fields cannot be properly decoded. +type LoadErrors struct { + Errors []*ConstructError +} + +func (e *LoadErrors) Error() string { + var b strings.Builder + b.WriteString("yaml: construct errors:") + for _, err := range e.Errors { + b.WriteString("\n ") + b.WriteString(err.Error()) + } + return b.String() +} + +// As implements errors.As for Go versions prior to 1.20 that don't support +// the Unwrap() []error interface. It allows [LoadErrors] to match against +// *ConstructError targets by returning the first error in the list. +func (e *LoadErrors) As(target any) bool { + switch t := target.(type) { + case **ConstructError: + if len(e.Errors) == 0 { + return false + } + *t = e.Errors[0] + return true + case **TypeError: + var msgs []string + for _, err := range e.Errors { + msgs = append(msgs, err.Error()) + } + *t = &TypeError{Errors: msgs} + return true + } + return false +} + +// Is implements errors.Is for Go versions prior to 1.20 that don't support +// the Unwrap() []error interface. It checks if any wrapped error matches +// the target error. +func (e *LoadErrors) Is(target error) bool { + for _, err := range e.Errors { + if errors.Is(err, target) { + return true + } + } + return false +} + +// TypeError is an obsolete error type retained for compatibility. +// +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +// +// Deprecated: Use [LoadErrors] instead. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// YAMLError is an internal error wrapper type. +type YAMLError struct { + Err error +} + +func (e *YAMLError) Error() string { + return e.Err.Error() +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/node.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/node.go new file mode 100644 index 000000000000..48b2582251bb --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/node.go @@ -0,0 +1,363 @@ +// Copyright 2011-2019 Canonical Ltd +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 + +// Node types and constants for YAML tree representation. +// Defines Kind, Style, and Node structure for intermediate YAML representation. + +package libyaml + +import ( + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// Tag constants for YAML types +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +const longTagPrefix = "tag:yaml.org,2002:" + +var ( + longTags = make(map[string]string) + shortTags = make(map[string]string) +) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +// Kind represents the type of YAML node +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode + StreamNode +) + +// Style represents the formatting style of a YAML node +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// StreamVersionDirective represents a YAML %YAML version directive for stream nodes. +type StreamVersionDirective struct { + Major int + Minor int +} + +// StreamTagDirective represents a YAML %TAG directive for stream nodes. +type StreamTagDirective struct { + Handle string + Prefix string +} + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, columns, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data pleasantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the appearance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted representation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int + + // StreamNode-specific fields (only valid when Kind == StreamNode) + + // Encoding holds the stream encoding (UTF-8, UTF-16LE, UTF-16BE). + // Only valid for StreamNode. + Encoding Encoding + + // Version holds the YAML version directive (%YAML). + // Only valid for StreamNode. + Version *StreamVersionDirective + + // TagDirectives holds the %TAG directives. + // Only valid for StreamNode. + TagDirectives []StreamTagDirective +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 && + n.Encoding == 0 && n.Version == nil && n.TagDirectives == nil +} + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + return strTag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// shouldUseLiteralStyle determines if a string should use literal style. +// It returns true if the string contains newlines AND meets additional criteria: +// - is at least 2 characters long +// - contains at least one non-whitespace character +func shouldUseLiteralStyle(s string) bool { + if !strings.Contains(s, "\n") || len(s) < 2 { + return false + } + // Must contain at least one non-whitespace character + for _, r := range s { + if !unicode.IsSpace(r) { + return true + } + } + return false +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if shouldUseLiteralStyle(n.Value) { + n.Style = LiteralStyle + } +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v any) (err error) { + d := NewConstructor(DefaultOptions) + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Pointer && !out.IsNil() { + out = out.Elem() + } + d.Construct(n, out) + if len(d.TypeErrors) > 0 { + return &LoadErrors{Errors: d.TypeErrors} + } + return nil +} + +// Load decodes the node and stores its data into the value pointed to by v, +// applying the given options. +// +// This method is useful when you need to preserve options like WithKnownFields() +// inside custom UnmarshalYAML implementations. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as v +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary. The v parameter +// must not be nil. +// +// See the documentation of the package-level Load function for details +// about YAML to Go conversion and tag options. +func (n *Node) Load(v any, opts ...Option) (err error) { + defer handleErr(&err) + o, err := ApplyOptions(opts...) + if err != nil { + return err + } + d := NewConstructor(o) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Pointer && !out.IsNil() { + out = out.Elem() + } + d.Construct(n, out) + if len(d.TypeErrors) > 0 { + return &LoadErrors{Errors: d.TypeErrors} + } + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v any) (err error) { + defer handleErr(&err) + e := NewRepresenter(noWriter, DefaultOptions) + defer e.Destroy() + e.MarshalDoc("", reflect.ValueOf(v)) + e.Finish() + p := NewComposer(e.Out) + p.Textless = true + defer p.Destroy() + doc := p.Parse() + *n = *doc.Content[0] + return nil +} + +// Dump encodes value v and stores its representation in n, +// applying the given options. +// +// This method is useful when you need to apply specific encoding options +// while building Node trees programmatically. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Dump(v any, opts ...Option) (err error) { + defer handleErr(&err) + o, err := ApplyOptions(opts...) + if err != nil { + return err + } + e := NewRepresenter(noWriter, o) + defer e.Destroy() + e.MarshalDoc("", reflect.ValueOf(v)) + e.Finish() + p := NewComposer(e.Out) + p.Textless = true + defer p.Destroy() + doc := p.Parse() + *n = *doc.Content[0] + return nil +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/options.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/options.go new file mode 100644 index 000000000000..a21c53c06a1b --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/options.go @@ -0,0 +1,390 @@ +// +// Copyright (c) 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 +// + +// Options configuration for loading and dumping YAML. +// Provides centralized control for indentation, line width, strictness, and +// more. + +package libyaml + +import ( + "errors" + "fmt" +) + +// Options holds configuration for both loading and dumping YAML. +type Options struct { + // Loading options + KnownFields bool // Enforce known fields in structs + SingleDocument bool // Only load first document + UniqueKeys bool // Enforce unique keys in mappings + StreamNodes bool // Enable stream node emission + AllDocuments bool // Load/Dump all documents in multi-document streams + + // Dumping options + Indent int // Indentation spaces (2-9) + CompactSeqIndent bool // Whether '- ' counts as indentation + LineWidth int // Preferred line width (-1 for unlimited) + Unicode bool // Allow non-ASCII characters + Canonical bool // Canonical YAML output + LineBreak LineBreak // Line ending style + ExplicitStart bool // Always emit --- + ExplicitEnd bool // Always emit ... + FlowSimpleCollections bool // Use flow style for simple collections + QuotePreference QuoteStyle // Preferred quote style when quoting is required +} + +// Option allows configuring YAML loading and dumping operations. +type Option func(*Options) error + +// WithIndent sets the number of spaces to use for indentation when +// dumping YAML content. +// +// Valid values are 2-9. Common choices: 2 (compact), 4 (readable). +func WithIndent(indent int) Option { + return func(o *Options) error { + if indent < 2 || indent > 9 { + return errors.New("yaml: indent must be between 2 and 9 spaces") + } + o.Indent = indent + return nil + } +} + +// WithCompactSeqIndent configures whether the sequence indicator '- ' is +// considered part of the indentation when dumping YAML content. +// +// If compact is true, '- ' is treated as part of the indentation. +// If compact is false, '- ' is not treated as part of the indentation. +// When called without arguments, defaults to true. +func WithCompactSeqIndent(compact ...bool) Option { + if len(compact) > 1 { + return func(o *Options) error { + return errors.New("yaml: WithCompactSeqIndent accepts at most one argument") + } + } + val := len(compact) == 0 || compact[0] + return func(o *Options) error { + o.CompactSeqIndent = val + return nil + } +} + +// WithKnownFields enables or disables strict field checking during YAML loading. +// +// When enabled, loading will return an error if the YAML input contains fields +// that do not correspond to any fields in the target struct. +// When called without arguments, defaults to true. +func WithKnownFields(knownFields ...bool) Option { + if len(knownFields) > 1 { + return func(o *Options) error { + return errors.New("yaml: WithKnownFields accepts at most one argument") + } + } + val := len(knownFields) == 0 || knownFields[0] + return func(o *Options) error { + o.KnownFields = val + return nil + } +} + +// WithSingleDocument configures the Loader to only process the first document +// in a YAML stream. After the first document is loaded, subsequent calls to +// Load will return io.EOF. +// +// When called without arguments, defaults to true. +// +// This is useful when you expect exactly one document and want behavior +// similar to [Unmarshal]. +func WithSingleDocument(singleDocument ...bool) Option { + if len(singleDocument) > 1 { + return func(o *Options) error { + return errors.New("yaml: WithSingleDocument accepts at most one argument") + } + } + val := len(singleDocument) == 0 || singleDocument[0] + return func(o *Options) error { + o.SingleDocument = val + return nil + } +} + +// WithStreamNodes enables returning stream boundary nodes when loading YAML. +// +// When enabled, Loader.Load returns an interleaved sequence of StreamNode and +// DocumentNode values: +// +// [StreamNode, DocNode, StreamNode, DocNode, ..., StreamNode] +// +// StreamNodes contain metadata about the stream including: +// - Encoding (UTF-8, UTF-16LE, UTF-16BE) +// - YAML version directive (%YAML) +// - Tag directives (%TAG) +// - Position information (Line, Column) +// +// An empty YAML stream returns a single StreamNode. +// When called without arguments, defaults to true. +// +// The default is false. +func WithStreamNodes(enable ...bool) Option { + if len(enable) > 1 { + return func(o *Options) error { + return errors.New("yaml: WithStreamNodes accepts at most one argument") + } + } + val := len(enable) == 0 || enable[0] + return func(o *Options) error { + o.StreamNodes = val + return nil + } +} + +// WithAllDocuments enables multi-document mode for Load and Dump operations. +// +// When used with Load, the target must be a pointer to a slice. +// All documents in the YAML stream will be decoded into the slice. +// Zero documents results in an empty slice (no error). +// +// When used with Dump, the input must be a slice. +// Each element will be encoded as a separate YAML document +// with "---" separators. +// +// When called without arguments, defaults to true. +// +// The default is false (single-document mode). +func WithAllDocuments(all ...bool) Option { + if len(all) > 1 { + return func(o *Options) error { + return errors.New("yaml: WithAllDocuments accepts at most one argument") + } + } + val := len(all) == 0 || all[0] + return func(o *Options) error { + o.AllDocuments = val + return nil + } +} + +// WithLineWidth sets the preferred line width for YAML output. +// +// When encoding long strings, the encoder will attempt to wrap them at this +// width using literal block style (|). Set to -1 or 0 for unlimited width. +// +// The default is 80 characters. +func WithLineWidth(width int) Option { + return func(o *Options) error { + if width < 0 { + width = -1 + } + o.LineWidth = width + return nil + } +} + +// WithUnicode controls whether non-ASCII characters are allowed in YAML output. +// +// When true, non-ASCII characters appear as-is (e.g., "café"). +// When false, non-ASCII characters are escaped (e.g., "caf\u00e9"). +// When called without arguments, defaults to true. +// +// The default is true. +func WithUnicode(unicode ...bool) Option { + if len(unicode) > 1 { + return func(o *Options) error { + return errors.New("yaml: WithUnicode accepts at most one argument") + } + } + val := len(unicode) == 0 || unicode[0] + return func(o *Options) error { + o.Unicode = val + return nil + } +} + +// WithUniqueKeys enables or disables duplicate key detection during YAML loading. +// +// When enabled, loading will return an error if the YAML input contains +// duplicate keys in any mapping. This is a security feature that prevents +// key override attacks. +// When called without arguments, defaults to true. +// +// The default is true. +func WithUniqueKeys(uniqueKeys ...bool) Option { + if len(uniqueKeys) > 1 { + return func(o *Options) error { + return errors.New("yaml: WithUniqueKeys accepts at most one argument") + } + } + val := len(uniqueKeys) == 0 || uniqueKeys[0] + return func(o *Options) error { + o.UniqueKeys = val + return nil + } +} + +// WithCanonical forces canonical YAML output format. +// +// When enabled, the encoder outputs strictly canonical YAML with explicit +// tags for all values. This produces verbose output primarily useful for +// debugging and YAML spec compliance testing. +// When called without arguments, defaults to true. +// +// The default is false. +func WithCanonical(canonical ...bool) Option { + if len(canonical) > 1 { + return func(o *Options) error { + return errors.New("yaml: WithCanonical accepts at most one argument") + } + } + val := len(canonical) == 0 || canonical[0] + return func(o *Options) error { + o.Canonical = val + return nil + } +} + +// WithLineBreak sets the line ending style for YAML output. +// +// Available options: +// - LineBreakLN: Unix-style \n (default) +// - LineBreakCR: Old Mac-style \r +// - LineBreakCRLN: Windows-style \r\n +// +// The default is LineBreakLN. +func WithLineBreak(lineBreak LineBreak) Option { + return func(o *Options) error { + o.LineBreak = lineBreak + return nil + } +} + +// WithExplicitStart controls whether document start markers (---) are always emitted. +// +// When true, every document begins with an explicit "---" marker. +// When false (default), the marker is omitted for the first document. +// When called without arguments, defaults to true. +func WithExplicitStart(explicit ...bool) Option { + if len(explicit) > 1 { + return func(o *Options) error { + return errors.New("yaml: WithExplicitStart accepts at most one argument") + } + } + val := len(explicit) == 0 || explicit[0] + return func(o *Options) error { + o.ExplicitStart = val + return nil + } +} + +// WithExplicitEnd controls whether document end markers (...) are always emitted. +// +// When true, every document ends with an explicit "..." marker. +// When false (default), the marker is omitted. +// When called without arguments, defaults to true. +func WithExplicitEnd(explicit ...bool) Option { + if len(explicit) > 1 { + return func(o *Options) error { + return errors.New("yaml: WithExplicitEnd accepts at most one argument") + } + } + val := len(explicit) == 0 || explicit[0] + return func(o *Options) error { + o.ExplicitEnd = val + return nil + } +} + +// WithFlowSimpleCollections controls whether simple collections use flow style. +// +// When true, sequences and mappings containing only scalar values (no nested +// collections) are rendered in flow style if they fit within the line width. +// Example: {name: test, count: 42} or [a, b, c] +// When called without arguments, defaults to true. +// +// When false (default), all collections use block style. +func WithFlowSimpleCollections(flow ...bool) Option { + if len(flow) > 1 { + return func(o *Options) error { + return errors.New("yaml: WithFlowSimpleCollections accepts at most one argument") + } + } + val := len(flow) == 0 || flow[0] + return func(o *Options) error { + o.FlowSimpleCollections = val + return nil + } +} + +// WithQuotePreference sets the preferred quote style for strings that require +// quoting. +// +// This option only affects strings that require quoting per the YAML spec. +// Plain strings that don't need quoting remain unquoted regardless of this +// setting. Quoting is required for: +// - Strings that look like other YAML types (true, false, null, 123, etc.) +// - Strings with leading/trailing whitespace +// - Strings containing special YAML syntax characters +// - Empty strings in certain contexts +// +// Quote styles: +// - QuoteSingle: Use single quotes (v4 default) +// - QuoteDouble: Use double quotes +// - QuoteLegacy: Legacy v2/v3 behavior (mixed quoting) +func WithQuotePreference(style QuoteStyle) Option { + return func(o *Options) error { + switch style { + case QuoteSingle, QuoteDouble, QuoteLegacy: + o.QuotePreference = style + return nil + default: + return fmt.Errorf("invalid QuoteStyle value: %d", style) + } + } +} + +// CombineOptions combines multiple options into a single Option. +// This is useful for creating option presets or combining version defaults +// with custom options. +func CombineOptions(opts ...Option) Option { + return func(o *Options) error { + for _, opt := range opts { + if err := opt(o); err != nil { + return err + } + } + return nil + } +} + +// ApplyOptions applies the given options to a new options struct. +// Starts with v4 defaults. +func ApplyOptions(opts ...Option) (*Options, error) { + o := &Options{ + Canonical: false, + LineBreak: LN_BREAK, + + // v4 defaults + Indent: 2, + CompactSeqIndent: true, + LineWidth: 80, + Unicode: true, + UniqueKeys: true, + } + for _, opt := range opts { + if err := opt(o); err != nil { + return nil, err + } + } + return o, nil +} + +// DefaultOptions holds the default options for APIs that don't accept options. +var DefaultOptions = &Options{ + Indent: 4, + LineWidth: -1, + Unicode: true, + UniqueKeys: true, + QuotePreference: QuoteLegacy, +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/parser.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/parser.go new file mode 100644 index 000000000000..34152d695961 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/parser.go @@ -0,0 +1,1368 @@ +// Copyright 2006-2010 Kirill Simonov +// Copyright 2011-2019 Canonical Ltd +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 AND MIT + +// Parser stage: Transforms token stream into event stream. +// Implements a recursive-descent parser (LL(1)) following the YAML grammar specification. +// +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +package libyaml + +import ( + "bytes" + "io" + "strings" +) + +// Peek the next token in the token queue. +func (parser *Parser) peekToken(out **Token) error { + if !parser.token_available { + if err := parser.fetchMoreTokens(); err != nil { + return err + } + } + + token := &parser.tokens[parser.tokens_head] + parser.UnfoldComments(token) + *out = token + return nil +} + +// UnfoldComments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func (parser *Parser) UnfoldComments(token *Token) { + for parser.comments_head < len(parser.comments) && token.StartMark.Index >= parser.comments[parser.comments_head].TokenMark.Index { + comment := &parser.comments[parser.comments_head] + if len(comment.Head) > 0 { + if token.Type == BLOCK_END_TOKEN { + // No heads on ends, so keep comment.Head for a follow up token. + break + } + if len(parser.HeadComment) > 0 { + parser.HeadComment = append(parser.HeadComment, '\n') + } + parser.HeadComment = append(parser.HeadComment, comment.Head...) + } + if len(comment.Foot) > 0 { + if len(parser.FootComment) > 0 { + parser.FootComment = append(parser.FootComment, '\n') + } + parser.FootComment = append(parser.FootComment, comment.Foot...) + } + if len(comment.Line) > 0 { + if len(parser.LineComment) > 0 { + parser.LineComment = append(parser.LineComment, '\n') + } + parser.LineComment = append(parser.LineComment, comment.Line...) + } + *comment = Comment{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func (parser *Parser) skipToken() { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].Type == STREAM_END_TOKEN + parser.tokens_head++ +} + +// Parse gets the next event. +func (parser *Parser) Parse(event *Event) error { + // Erase the event object. + *event = Event{} + + if parser.lastError != nil { + return parser.lastError + } + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.state == PARSE_END_STATE { + return io.EOF + } + + // Generate the next event. + if err := parser.stateMachine(event); err != nil { + parser.lastError = err + return err + } + + return nil +} + +func formatParserError(problem string, problem_mark Mark) error { + return ParserError{ + Mark: problem_mark, + Message: problem, + } +} + +func formatParserErrorContext(context string, context_mark Mark, problem string, problem_mark Mark) error { + return ParserError{ + ContextMark: context_mark, + ContextMessage: context, + + Mark: problem_mark, + Message: problem, + } +} + +// State dispatcher. +func (parser *Parser) stateMachine(event *Event) error { + // trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case PARSE_STREAM_START_STATE: + return parser.parseStreamStart(event) + + case PARSE_IMPLICIT_DOCUMENT_START_STATE: + return parser.parseDocumentStart(event, true) + + case PARSE_DOCUMENT_START_STATE: + return parser.parseDocumentStart(event, false) + + case PARSE_DOCUMENT_CONTENT_STATE: + return parser.parseDocumentContent(event) + + case PARSE_DOCUMENT_END_STATE: + return parser.parseDocumentEnd(event) + + case PARSE_BLOCK_NODE_STATE: + return parser.parseNode(event, true, false) + + case PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return parser.parseBlockSequenceEntry(event, true) + + case PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return parser.parseBlockSequenceEntry(event, false) + + case PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return parser.parseIndentlessSequenceEntry(event) + + case PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return parser.parseBlockMappingKey(event, true) + + case PARSE_BLOCK_MAPPING_KEY_STATE: + return parser.parseBlockMappingKey(event, false) + + case PARSE_BLOCK_MAPPING_VALUE_STATE: + return parser.parseBlockMappingValue(event) + + case PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return parser.parseFlowSequenceEntry(event, true) + + case PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return parser.parseFlowSequenceEntry(event, false) + + case PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return parser.parseFlowSequenceEntryMappingKey(event) + + case PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return parser.parseFlowSequenceEntryMappingValue(event) + + case PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return parser.parseFlowSequenceEntryMappingEnd(event) + + case PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return parser.parseFlowMappingKey(event, true) + + case PARSE_FLOW_MAPPING_KEY_STATE: + return parser.parseFlowMappingKey(event, false) + + case PARSE_FLOW_MAPPING_VALUE_STATE: + return parser.parseFlowMappingValue(event, false) + + case PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return parser.parseFlowMappingValue(event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// +// ************ +func (parser *Parser) parseStreamStart(event *Event) error { + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type != STREAM_START_TOKEN { + return formatParserError("did not find expected ", token.StartMark) + } + parser.state = PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = Event{ + Type: STREAM_START_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + encoding: token.encoding, + } + parser.skipToken() + return nil +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// * +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// ************************* +func (parser *Parser) parseDocumentStart(event *Event, implicit bool) error { + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + + // Parse extra document end indicators. + if !implicit { + for token.Type == DOCUMENT_END_TOKEN { + parser.skipToken() + if err := parser.peekToken(&token); err != nil { + return err + } + } + } + + if implicit && token.Type != VERSION_DIRECTIVE_TOKEN && + token.Type != TAG_DIRECTIVE_TOKEN && + token.Type != DOCUMENT_START_TOKEN && + token.Type != STREAM_END_TOKEN { + // Parse an implicit document. + if err := parser.processDirectives(nil, nil); err != nil { + return err + } + parser.states = append(parser.states, PARSE_DOCUMENT_END_STATE) + parser.state = PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.HeadComment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.HeadComment) - 1; i > 0; i-- { + if parser.HeadComment[i] == '\n' { + if i == len(parser.HeadComment)-1 { + head_comment = parser.HeadComment[:i] + parser.HeadComment = parser.HeadComment[i+1:] + break + } else if parser.HeadComment[i-1] == '\n' { + head_comment = parser.HeadComment[:i-1] + parser.HeadComment = parser.HeadComment[i+1:] + break + } + } + } + } + + *event = Event{ + Type: DOCUMENT_START_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + Implicit: true, + + HeadComment: head_comment, + } + + } else if token.Type != STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *VersionDirective + var tag_directives []TagDirective + start_mark := token.StartMark + if err := parser.processDirectives(&version_directive, &tag_directives); err != nil { + return err + } + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type != DOCUMENT_START_TOKEN { + return formatParserError( + "did not find expected ", token.StartMark) + } + parser.states = append(parser.states, PARSE_DOCUMENT_END_STATE) + parser.state = PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.EndMark + + *event = Event{ + Type: DOCUMENT_START_EVENT, + StartMark: start_mark, + EndMark: end_mark, + versionDirective: version_directive, + tagDirectives: tag_directives, + Implicit: false, + } + parser.skipToken() + + } else { + // Parse the stream end. + parser.state = PARSE_END_STATE + *event = Event{ + Type: STREAM_END_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + } + parser.skipToken() + } + + return nil +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// *********** +func (parser *Parser) parseDocumentContent(event *Event) error { + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + + if token.Type == VERSION_DIRECTIVE_TOKEN || + token.Type == TAG_DIRECTIVE_TOKEN || + token.Type == DOCUMENT_START_TOKEN || + token.Type == DOCUMENT_END_TOKEN || + token.Type == STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return parser.processEmptyScalar(event, + token.StartMark) + } + return parser.parseNode(event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// ************* +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +func (parser *Parser) parseDocumentEnd(event *Event) error { + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + + start_mark := token.StartMark + end_mark := token.StartMark + + implicit := true + if token.Type == DOCUMENT_END_TOKEN { + end_mark = token.EndMark + parser.skipToken() + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = PARSE_DOCUMENT_START_STATE + *event = Event{ + Type: DOCUMENT_END_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Implicit: implicit, + } + parser.setEventComments(event) + if len(event.HeadComment) > 0 && len(event.FootComment) == 0 { + event.FootComment = event.HeadComment + event.HeadComment = nil + } + return nil +} + +func (parser *Parser) setEventComments(event *Event) { + event.HeadComment = parser.HeadComment + event.LineComment = parser.LineComment + event.FootComment = parser.FootComment + parser.HeadComment = nil + parser.LineComment = nil + parser.FootComment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// +// block_node ::= ALIAS +// +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// +// flow_node ::= ALIAS +// +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// +// ************************* +// +// block_content ::= block_collection | flow_collection | SCALAR +// +// ****** +// +// flow_content ::= flow_collection | SCALAR +// +// ****** +func (parser *Parser) parseNode(event *Event, block, indentless_sequence bool) error { + // defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + + if token.Type == ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = Event{ + Type: ALIAS_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + Anchor: token.Value, + } + parser.setEventComments(event) + parser.skipToken() + return nil + } + + start_mark := token.StartMark + end_mark := token.StartMark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark Mark + switch token.Type { + case ANCHOR_TOKEN: + anchor = token.Value + start_mark = token.StartMark + end_mark = token.EndMark + parser.skipToken() + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type == TAG_TOKEN { + tag_token = true + tag_handle = token.Value + tag_suffix = token.suffix + tag_mark = token.StartMark + end_mark = token.EndMark + parser.skipToken() + if err := parser.peekToken(&token); err != nil { + return err + } + } + case TAG_TOKEN: + tag_token = true + tag_handle = token.Value + tag_suffix = token.suffix + start_mark = token.StartMark + tag_mark = token.StartMark + end_mark = token.EndMark + parser.skipToken() + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type == ANCHOR_TOKEN { + anchor = token.Value + end_mark = token.EndMark + parser.skipToken() + if err := parser.peekToken(&token); err != nil { + return err + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + return formatParserErrorContext( + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.Type == BLOCK_ENTRY_TOKEN { + end_mark = token.EndMark + parser.state = PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = Event{ + Type: SEQUENCE_START_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + Style: Style(BLOCK_SEQUENCE_STYLE), + } + return nil + } + if token.Type == SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.EndMark + if (len(tag) == 0 && token.Style == PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = Event{ + Type: SCALAR_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Anchor: anchor, + Tag: tag, + Value: token.Value, + Implicit: plain_implicit, + quoted_implicit: quoted_implicit, + Style: Style(token.Style), + } + parser.setEventComments(event) + parser.skipToken() + return nil + } + if token.Type == FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.EndMark + parser.state = PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = Event{ + Type: SEQUENCE_START_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + Style: Style(FLOW_SEQUENCE_STYLE), + } + parser.setEventComments(event) + return nil + } + if token.Type == FLOW_MAPPING_START_TOKEN { + end_mark = token.EndMark + parser.state = PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = Event{ + Type: MAPPING_START_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + Style: Style(FLOW_MAPPING_STYLE), + } + parser.setEventComments(event) + return nil + } + if block && token.Type == BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.EndMark + parser.state = PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = Event{ + Type: SEQUENCE_START_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + Style: Style(BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.HeadComment = parser.stem_comment + parser.stem_comment = nil + } + return nil + } + if block && token.Type == BLOCK_MAPPING_START_TOKEN { + end_mark = token.EndMark + parser.state = PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = Event{ + Type: MAPPING_START_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + Style: Style(BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.HeadComment = parser.stem_comment + parser.stem_comment = nil + } + return nil + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = Event{ + Type: SCALAR_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + quoted_implicit: false, + Style: Style(PLAIN_SCALAR_STYLE), + } + return nil + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + return formatParserErrorContext(context, start_mark, + "did not find expected node content", token.StartMark) +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// +// ******************** *********** * ********* +func (parser *Parser) parseBlockSequenceEntry(event *Event, first bool) error { + if first { + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + parser.marks = append(parser.marks, token.StartMark) + parser.skipToken() + } + + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + + if token.Type == BLOCK_ENTRY_TOKEN { + mark := token.EndMark + prior_head_len := len(parser.HeadComment) + parser.skipToken() + if err := parser.splitStemComment(prior_head_len); err != nil { + return err + } + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type != BLOCK_ENTRY_TOKEN && token.Type != BLOCK_END_TOKEN { + parser.states = append(parser.states, PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return parser.parseNode(event, true, false) + } else { + parser.state = PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return parser.processEmptyScalar(event, mark) + } + } + if token.Type == BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = Event{ + Type: SEQUENCE_END_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + } + + parser.skipToken() + return nil + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return formatParserErrorContext( + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.StartMark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// +// *********** * +func (parser *Parser) parseIndentlessSequenceEntry(event *Event) error { + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + + if token.Type == BLOCK_ENTRY_TOKEN { + mark := token.EndMark + prior_head_len := len(parser.HeadComment) + parser.skipToken() + if err := parser.splitStemComment(prior_head_len); err != nil { + return err + } + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type != BLOCK_ENTRY_TOKEN && + token.Type != KEY_TOKEN && + token.Type != VALUE_TOKEN && + token.Type != BLOCK_END_TOKEN { + parser.states = append(parser.states, PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return parser.parseNode(event, true, false) + } + parser.state = PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return parser.processEmptyScalar(event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = Event{ + Type: SEQUENCE_END_EVENT, + StartMark: token.StartMark, + EndMark: token.StartMark, // [Go] Shouldn't this be token.end_mark? + } + return nil +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func (parser *Parser) splitStemComment(stem_len int) error { + if stem_len == 0 { + return nil + } + + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type != BLOCK_SEQUENCE_START_TOKEN && token.Type != BLOCK_MAPPING_START_TOKEN { + return nil + } + + parser.stem_comment = parser.HeadComment[:stem_len] + if len(parser.HeadComment) == stem_len { + parser.HeadComment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.HeadComment = append([]byte(nil), parser.HeadComment[stem_len+1:]...) + } + return nil +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +func (parser *Parser) parseBlockMappingKey(event *Event, first bool) error { + if first { + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + parser.marks = append(parser.marks, token.StartMark) + parser.skipToken() + } + + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = Event{ + Type: TAIL_COMMENT_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + FootComment: parser.tail_comment, + } + parser.tail_comment = nil + return nil + } + + switch token.Type { + case KEY_TOKEN: + mark := token.EndMark + parser.skipToken() + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type != KEY_TOKEN && + token.Type != VALUE_TOKEN && + token.Type != BLOCK_END_TOKEN { + parser.states = append(parser.states, PARSE_BLOCK_MAPPING_VALUE_STATE) + return parser.parseNode(event, true, true) + } else { + parser.state = PARSE_BLOCK_MAPPING_VALUE_STATE + return parser.processEmptyScalar(event, mark) + } + case BLOCK_END_TOKEN: + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = Event{ + Type: MAPPING_END_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + } + parser.setEventComments(event) + parser.skipToken() + return nil + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return formatParserErrorContext( + "while parsing a block mapping", context_mark, + "did not find expected key", token.StartMark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +func (parser *Parser) parseBlockMappingValue(event *Event) error { + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type == VALUE_TOKEN { + mark := token.EndMark + parser.skipToken() + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type != KEY_TOKEN && + token.Type != VALUE_TOKEN && + token.Type != BLOCK_END_TOKEN { + parser.states = append(parser.states, PARSE_BLOCK_MAPPING_KEY_STATE) + return parser.parseNode(event, true, true) + } + parser.state = PARSE_BLOCK_MAPPING_KEY_STATE + return parser.processEmptyScalar(event, mark) + } + parser.state = PARSE_BLOCK_MAPPING_KEY_STATE + return parser.processEmptyScalar(event, token.StartMark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func (parser *Parser) parseFlowSequenceEntry(event *Event, first bool) error { + if first { + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + parser.marks = append(parser.marks, token.StartMark) + parser.skipToken() + } + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type != FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.Type == FLOW_ENTRY_TOKEN { + parser.skipToken() + if err := parser.peekToken(&token); err != nil { + return err + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return formatParserErrorContext( + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.StartMark) + } + } + + if token.Type == KEY_TOKEN { + parser.state = PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = Event{ + Type: MAPPING_START_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + Implicit: true, + Style: Style(FLOW_MAPPING_STYLE), + } + parser.skipToken() + return nil + } else if token.Type != FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return parser.parseNode(event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = Event{ + Type: SEQUENCE_END_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + } + parser.setEventComments(event) + + parser.skipToken() + return nil +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// *** * +func (parser *Parser) parseFlowSequenceEntryMappingKey(event *Event) error { + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type != VALUE_TOKEN && + token.Type != FLOW_ENTRY_TOKEN && + token.Type != FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return parser.parseNode(event, false, false) + } + mark := token.EndMark + parser.skipToken() + parser.state = PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return parser.processEmptyScalar(event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// ***** * +func (parser *Parser) parseFlowSequenceEntryMappingValue(event *Event) error { + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type == VALUE_TOKEN { + parser.skipToken() + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type != FLOW_ENTRY_TOKEN && token.Type != FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return parser.parseNode(event, false, false) + } + } + parser.state = PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return parser.processEmptyScalar(event, token.StartMark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func (parser *Parser) parseFlowSequenceEntryMappingEnd(event *Event) error { + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + parser.state = PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = Event{ + Type: MAPPING_END_EVENT, + StartMark: token.StartMark, + EndMark: token.StartMark, // [Go] Shouldn't this be end_mark? + } + return nil +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - *** * +func (parser *Parser) parseFlowMappingKey(event *Event, first bool) error { + if first { + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + parser.marks = append(parser.marks, token.StartMark) + parser.skipToken() + } + + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + + if token.Type != FLOW_MAPPING_END_TOKEN { + if !first { + if token.Type == FLOW_ENTRY_TOKEN { + parser.skipToken() + if err := parser.peekToken(&token); err != nil { + return err + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return formatParserErrorContext( + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.StartMark) + } + } + + if token.Type == KEY_TOKEN { + parser.skipToken() + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type != VALUE_TOKEN && + token.Type != FLOW_ENTRY_TOKEN && + token.Type != FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, PARSE_FLOW_MAPPING_VALUE_STATE) + return parser.parseNode(event, false, false) + } else { + parser.state = PARSE_FLOW_MAPPING_VALUE_STATE + return parser.processEmptyScalar(event, token.StartMark) + } + } else if token.Type != FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return parser.parseNode(event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = Event{ + Type: MAPPING_END_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + } + parser.setEventComments(event) + parser.skipToken() + return nil +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - ***** * +func (parser *Parser) parseFlowMappingValue(event *Event, empty bool) error { + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + if empty { + parser.state = PARSE_FLOW_MAPPING_KEY_STATE + return parser.processEmptyScalar(event, token.StartMark) + } + if token.Type == VALUE_TOKEN { + parser.skipToken() + if err := parser.peekToken(&token); err != nil { + return err + } + if token.Type != FLOW_ENTRY_TOKEN && token.Type != FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, PARSE_FLOW_MAPPING_KEY_STATE) + return parser.parseNode(event, false, false) + } + } + parser.state = PARSE_FLOW_MAPPING_KEY_STATE + return parser.processEmptyScalar(event, token.StartMark) +} + +// Generate an empty scalar event. +func (parser *Parser) processEmptyScalar(event *Event, mark Mark) error { + *event = Event{ + Type: SCALAR_EVENT, + StartMark: mark, + EndMark: mark, + Value: nil, // Empty + Implicit: true, + Style: Style(PLAIN_SCALAR_STYLE), + } + return nil +} + +var default_tag_directives = []TagDirective{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func (parser *Parser) processDirectives(version_directive_ref **VersionDirective, tag_directives_ref *[]TagDirective) error { + var version_directive *VersionDirective + var tag_directives []TagDirective + + var token *Token + if err := parser.peekToken(&token); err != nil { + return err + } + + for token.Type == VERSION_DIRECTIVE_TOKEN || token.Type == TAG_DIRECTIVE_TOKEN { + switch token.Type { + case VERSION_DIRECTIVE_TOKEN: + if version_directive != nil { + return formatParserError( + "found duplicate %YAML directive", token.StartMark) + } + if token.major != 1 || token.minor != 1 { + return formatParserError( + "found incompatible YAML document", token.StartMark) + } + version_directive = &VersionDirective{ + major: token.major, + minor: token.minor, + } + case TAG_DIRECTIVE_TOKEN: + value := TagDirective{ + handle: token.Value, + prefix: token.prefix, + } + if err := parser.appendTagDirective(value, false, token.StartMark); err != nil { + return err + } + tag_directives = append(tag_directives, value) + } + + parser.skipToken() + if err := parser.peekToken(&token); err != nil { + return err + } + } + + for i := range default_tag_directives { + if err := parser.appendTagDirective(default_tag_directives[i], true, token.StartMark); err != nil { + return err + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return nil +} + +// Append a tag directive to the directives stack. +func (parser *Parser) appendTagDirective(value TagDirective, allow_duplicates bool, mark Mark) error { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return nil + } + return formatParserError("found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := TagDirective{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return nil +} + +// ParserGetEvents parses the YAML input and returns the generated event stream. +func ParserGetEvents(in []byte) (string, error) { + p := NewComposer(in) + defer p.Destroy() + var events strings.Builder + var event Event + for { + if err := p.Parser.Parse(&event); err != nil { + return "", err + } + formatted := formatEvent(&event) + events.WriteString(formatted) + if event.Type == STREAM_END_EVENT { + event.Delete() + break + } + event.Delete() + events.WriteByte('\n') + } + return events.String(), nil +} + +func formatEvent(e *Event) string { + var b strings.Builder + switch e.Type { + case STREAM_START_EVENT: + b.WriteString("+STR") + case STREAM_END_EVENT: + b.WriteString("-STR") + case DOCUMENT_START_EVENT: + b.WriteString("+DOC") + if !e.Implicit { + b.WriteString(" ---") + } + case DOCUMENT_END_EVENT: + b.WriteString("-DOC") + if !e.Implicit { + b.WriteString(" ...") + } + case ALIAS_EVENT: + b.WriteString("=ALI *") + b.Write(e.Anchor) + case SCALAR_EVENT: + b.WriteString("=VAL") + if len(e.Anchor) > 0 { + b.WriteString(" &") + b.Write(e.Anchor) + } + if len(e.Tag) > 0 { + b.WriteString(" <") + b.Write(e.Tag) + b.WriteString(">") + } + switch e.ScalarStyle() { + case PLAIN_SCALAR_STYLE: + b.WriteString(" :") + case LITERAL_SCALAR_STYLE: + b.WriteString(" |") + case FOLDED_SCALAR_STYLE: + b.WriteString(" >") + case SINGLE_QUOTED_SCALAR_STYLE: + b.WriteString(" '") + case DOUBLE_QUOTED_SCALAR_STYLE: + b.WriteString(` "`) + } + // Escape special characters for consistent event output. + val := strings.NewReplacer( + `\`, `\\`, + "\n", `\n`, + "\t", `\t`, + ).Replace(string(e.Value)) + b.WriteString(val) + + case SEQUENCE_START_EVENT: + b.WriteString("+SEQ") + if len(e.Anchor) > 0 { + b.WriteString(" &") + b.Write(e.Anchor) + } + if len(e.Tag) > 0 { + b.WriteString(" <") + b.Write(e.Tag) + b.WriteString(">") + } + if e.SequenceStyle() == FLOW_SEQUENCE_STYLE { + b.WriteString(" []") + } + case SEQUENCE_END_EVENT: + b.WriteString("-SEQ") + case MAPPING_START_EVENT: + b.WriteString("+MAP") + if len(e.Anchor) > 0 { + b.WriteString(" &") + b.Write(e.Anchor) + } + if len(e.Tag) > 0 { + b.WriteString(" <") + b.Write(e.Tag) + b.WriteString(">") + } + if e.MappingStyle() == FLOW_MAPPING_STYLE { + b.WriteString(" {}") + } + case MAPPING_END_EVENT: + b.WriteString("-MAP") + } + return b.String() +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/reader.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/reader.go new file mode 100644 index 000000000000..ecc00fe283e2 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/reader.go @@ -0,0 +1,441 @@ +// Copyright 2006-2010 Kirill Simonov +// Copyright 2011-2019 Canonical Ltd +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 AND MIT + +// Input reader with encoding detection and buffering. +// Handles BOM detection, UTF-8/UTF-16 conversion, and provides buffered input +// for the scanner. + +package libyaml + +import ( + "errors" + "fmt" + "io" +) + +func formatReaderError(problem string, offset int, value int) error { + return ReaderError{ + Offset: offset, + Value: value, + Err: errors.New(problem), + } +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func (parser *Parser) determineEncoding() error { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if err := parser.updateRawBuffer(); err != nil { + return err + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = UTF8_ENCODING + } + return nil +} + +// Update the raw buffer. +func (parser *Parser) updateRawBuffer() error { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return nil + } + + // Return on EOF. + if parser.eof { + return nil + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return ReaderError{ + Offset: parser.offset, + Value: -1, + Err: fmt.Errorf("input error: %w", err), + } + } + return nil +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func (parser *Parser) updateBuffer(length int) error { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + // + //nolint:staticcheck // there is no problem with this empty branch as it's documentation. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + // return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return nil + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == ANY_ENCODING { + if err := parser.determineEncoding(); err != nil { + return err + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if err := parser.updateRawBuffer(); err != nil { + parser.buffer = parser.buffer[:buffer_len] + return err + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return formatReaderError( + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return formatReaderError( + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return formatReaderError( + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return formatReaderError( + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return formatReaderError( + "invalid Unicode character", + parser.offset, int(value)) + } + + case UTF16LE_ENCODING, UTF16BE_ENCODING: + var low, high int + if parser.encoding == UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return formatReaderError( + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return formatReaderError( + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return formatReaderError( + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return formatReaderError( + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // YAML 1.2 compatible character sets + // Check if the character is in the allowed range: + // For JSON compatibility in quoted scalars, we must allow all + // non-C0 characters. This includes ASCII DEL (0x7F) and the + // C1 control block [#x80-#x9F]. + // ref: https://yaml.org/spec/1.2.2/#51-character-set + switch { + // 8 bit set + // Tab (\t) + case value == 0x09: + // Line feed (LF \n) + case value == 0x0A: + // Carriage Return (CR \r) + case value == 0x0D: + // 16 bit set + // Printable ASCII + case value >= 0x20 && value <= 0x7E: + // DEL, C1 control + // incompatible with YAML versions <= 1.1 + case value >= 0x7F && value <= 0x9F: + // and Basic Multilingual Plane (BMP), + case value >= 0xA0 && value <= 0xD7FF: + // Additional Unicode Areas + case value >= 0xE000 && value <= 0xFFFD: + // 32 bit set + case value >= 0x10000 && value <= 0x10FFFF: + default: + return formatReaderError( + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return nil +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/representer.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/representer.go new file mode 100644 index 000000000000..2f451949f1bf --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/representer.go @@ -0,0 +1,571 @@ +// Copyright 2011-2019 Canonical Ltd +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 + +// Representer stage: Converts Go values to YAML nodes. +// Handles marshaling from Go types to the intermediate node representation. + +package libyaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode" + "unicode/utf8" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Pointer) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Pointer) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} + +// Sentinel values for newRepresenter parameters. +// These provide clarity at call sites, similar to http.NoBody. +var ( + noWriter io.Writer = nil + noVersionDirective *VersionDirective = nil + noTagDirective []TagDirective = nil +) + +type Representer struct { + Emitter Emitter + Out []byte + flow bool + Indent int + lineWidth int + doneInit bool + explicitStart bool + explicitEnd bool + flowSimpleCollections bool + quotePreference QuoteStyle +} + +// NewRepresenter creates a new YAML representr with the given options. +// +// The writer parameter specifies the output destination for the representr. +// If writer is nil, the representr will write to an internal buffer. +func NewRepresenter(writer io.Writer, opts *Options) *Representer { + emitter := NewEmitter() + emitter.CompactSequenceIndent = opts.CompactSeqIndent + emitter.quotePreference = opts.QuotePreference + emitter.SetWidth(opts.LineWidth) + emitter.SetUnicode(opts.Unicode) + emitter.SetCanonical(opts.Canonical) + emitter.SetLineBreak(opts.LineBreak) + + r := &Representer{ + Emitter: emitter, + Indent: opts.Indent, + lineWidth: opts.LineWidth, + explicitStart: opts.ExplicitStart, + explicitEnd: opts.ExplicitEnd, + flowSimpleCollections: opts.FlowSimpleCollections, + quotePreference: opts.QuotePreference, + } + + if writer != nil { + r.Emitter.SetOutputWriter(writer) + } else { + r.Emitter.SetOutputString(&r.Out) + } + + return r +} + +func (r *Representer) init() { + if r.doneInit { + return + } + if r.Indent == 0 { + r.Indent = 4 + } + r.Emitter.BestIndent = r.Indent + r.emit(NewStreamStartEvent(UTF8_ENCODING)) + r.doneInit = true +} + +func (r *Representer) Finish() { + r.Emitter.OpenEnded = false + r.emit(NewStreamEndEvent()) +} + +func (r *Representer) Destroy() { + r.Emitter.Delete() +} + +func (r *Representer) emit(event Event) { + // This will internally delete the event value. + r.must(r.Emitter.Emit(&event)) +} + +func (r *Representer) must(err error) { + if err != nil { + msg := err.Error() + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (r *Representer) MarshalDoc(tag string, in reflect.Value) { + r.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + r.nodev(in) + } else { + // Use !explicitStart for implicit flag (true = implicit/no marker) + r.emit(NewDocumentStartEvent(noVersionDirective, noTagDirective, !r.explicitStart)) + r.marshal(tag, in) + // Use !explicitEnd for implicit flag + r.emit(NewDocumentEndEvent(!r.explicitEnd)) + } +} + +func (r *Representer) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Pointer && in.IsNil() { + r.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + r.nodev(in) + return + case Node: + if !in.CanAddr() { + n := reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + r.nodev(in.Addr()) + return + case time.Time: + r.timev(tag, in) + return + case *time.Time: + r.timev(tag, in.Elem()) + return + case time.Duration: + r.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + Fail(err) + } + if v == nil { + r.nilv() + return + } + r.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + Fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + r.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + r.marshal(tag, in.Elem()) + case reflect.Map: + r.mapv(tag, in) + case reflect.Pointer: + r.marshal(tag, in.Elem()) + case reflect.Struct: + r.structv(tag, in) + case reflect.Slice, reflect.Array: + r.slicev(tag, in) + case reflect.String: + r.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + r.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + r.uintv(tag, in) + case reflect.Float32, reflect.Float64: + r.floatv(tag, in) + case reflect.Bool: + r.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (r *Representer) mapv(tag string, in reflect.Value) { + r.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + r.marshal("", k) + r.marshal("", in.MapIndex(k)) + } + }) +} + +func (r *Representer) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Pointer { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (r *Representer) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + r.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = r.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + r.marshal("", reflect.ValueOf(info.Key)) + r.flow = info.Flow + r.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + r.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + r.marshal("", k) + r.flow = false + r.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (r *Representer) mappingv(tag string, f func()) { + implicit := tag == "" + style := BLOCK_MAPPING_STYLE + if r.flow { + r.flow = false + style = FLOW_MAPPING_STYLE + } + r.emit(NewMappingStartEvent(nil, []byte(tag), implicit, style)) + f() + r.emit(NewMappingEndEvent()) +} + +func (r *Representer) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := BLOCK_SEQUENCE_STYLE + if r.flow { + r.flow = false + style = FLOW_SEQUENCE_STYLE + } + r.emit(NewSequenceStartEvent(nil, []byte(tag), implicit, style)) + n := in.Len() + for i := 0; i < n; i++ { + r.marshal("", in.Index(i)) + } + r.emit(NewSequenceEndEvent()) +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshaled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshaled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +// looksLikeMerge returns true if the given string is the merge indicator "<<". +// +// When encoding a scalar with this exact value, it must be quoted to prevent it +// from being interpreted as a merge indicator during decoding. +func looksLikeMerge(s string) (result bool) { + return s == "<<" +} + +func (r *Representer) stringv(tag string, in reflect.Value) { + var style ScalarStyle + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be represented directly as YAML so use a binary tag + // and represent it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when represented unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && + !(isBase60Float(s) || + isOldBool(s) || + looksLikeMerge(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if r.flow || !shouldUseLiteralStyle(s) { + style = DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = PLAIN_SCALAR_STYLE + default: + style = r.quotePreference.ScalarStyle() + } + r.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (r *Representer) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + r.emitScalar(s, "", tag, PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (r *Representer) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + r.emitScalar(s, "", tag, PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (r *Representer) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + r.emitScalar(s, "", tag, PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (r *Representer) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + r.emitScalar(s, "", tag, PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (r *Representer) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + r.emitScalar(s, "", tag, PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (r *Representer) nilv() { + r.emitScalar("null", "", "", PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (r *Representer) emitScalar( + value, anchor, tag string, style ScalarStyle, head, line, foot, tail []byte, +) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + event := NewScalarEvent([]byte(anchor), []byte(tag), []byte(value), implicit, implicit, style) + event.HeadComment = head + event.LineComment = line + event.FootComment = foot + event.TailComment = tail + r.emit(event) +} + +func (r *Representer) nodev(in reflect.Value) { + r.node(in.Interface().(*Node), "") +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/resolver.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/resolver.go new file mode 100644 index 000000000000..d78a20e8ed8f --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/resolver.go @@ -0,0 +1,231 @@ +// Copyright 2011-2019 Canonical Ltd +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 + +// Tag resolution for YAML scalars. +// Determines implicit types (int, float, bool, null, timestamp) from untagged +// scalar values. + +package libyaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value any + tag string +} + +var ( + resolveTable = make([]byte, 256) + resolveMap = make(map[string]resolveMapItem) +) + +// negativeZero represents -0.0 for YAML encoding/decoding +// this is needed because Go constants cannot express -0.0 +// https://staticcheck.dev/docs/checks/#SA4026 +var negativeZero = math.Copysign(0.0, -1.0) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~<" { // < for merge key << + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + resolveMapList := []struct { + v any + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {negativeZero, floatTag, []string{"-0", "-0.0"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out any) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot construct %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.ReplaceAll(in, "_", "") + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/scanner.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/scanner.go new file mode 100644 index 000000000000..36f62b0ea0e5 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/scanner.go @@ -0,0 +1,3128 @@ +// Copyright 2006-2010 Kirill Simonov +// Copyright 2011-2019 Canonical Ltd +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 AND MIT + +// Scanner stage: Transforms input stream into token sequence. +// The Scanner is the most complex stage, handling indentation, simple keys, +// and block collection detection. + +package libyaml + +import ( + "bytes" + "fmt" + "io" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The corresponding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax peculiarities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Advance the buffer pointer. +func (parser *Parser) skip() { + if !isBlank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.Index++ + parser.mark.Column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func (parser *Parser) skipLine() { + if isCRLF(parser.buffer, parser.buffer_pos) { + parser.mark.Index += 2 + parser.mark.Column = 0 + parser.mark.Line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if isLineBreak(parser.buffer, parser.buffer_pos) { + parser.mark.Index++ + parser.mark.Column = 0 + parser.mark.Line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func (parser *Parser) read(s []byte) []byte { + if !isBlank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.Index++ + parser.mark.Column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func (parser *Parser) readLine(s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.Index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.Index++ + parser.mark.Column = 0 + parser.mark.Line++ + parser.unread-- + parser.newlines++ + return s +} + +// Scan gets the next token. +func (parser *Parser) Scan(token *Token) error { + // Erase the token object. + *token = Token{} // [Go] Is this necessary? + + if parser.lastError != nil { + return parser.lastError + } + + // No tokens after STREAM-END or error. + if parser.stream_end_produced { + return io.EOF + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if err := parser.fetchMoreTokens(); err != nil { + parser.lastError = err + return err + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.Type == STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return nil +} + +func formatScannerError(problem string, problem_mark Mark) error { + problem_mark.Line += 1 + + return ScannerError{ + Mark: problem_mark, + Message: problem, + } +} + +func formatScannerErrorContext(context string, context_mark Mark, problem string, problem_mark Mark) error { + context_mark.Line += 1 + problem_mark.Line += 1 + + return ScannerError{ + ContextMark: context_mark, + ContextMessage: context, + + Mark: problem_mark, + Message: problem, + } +} + +func (parser *Parser) setScannerTagError(directive bool, context_mark Mark, problem string) error { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return formatScannerErrorContext(context, context_mark, problem, parser.mark) +} + +func trace(args ...any) func() { + pargs := append([]any{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]any{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func (parser *Parser) fetchMoreTokens() error { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + + var first_key int + found_potential_key := false + + if len(parser.simple_key_stack) > 0 { + // Found a simple key on the stack + first_key = parser.simple_key_stack[0].token_number + found_potential_key = true + } else if parser.simple_key_possible { + // Found a 'current' simple key (which was not pushed to the stack yet) + first_key = parser.simple_key.token_number + found_potential_key = true + } + + if !found_potential_key { + // We don't have any potential simple keys + break + } else if parser.tokens_parsed != first_key { + // We have not reached the potential simple key yet. + break + } + } + // Fetch the next token. + if err := parser.fetchNextToken(); err != nil { + return err + } + } + + parser.token_available = true + return nil +} + +// The dispatcher for token fetchers. +func (parser *Parser) fetchNextToken() (err error) { + // Ensure that the buffer is initialized. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return parser.fetchStreamStart() + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if err := parser.scanToNextToken(); err != nil { + return err + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if err := parser.unrollIndent(parser.mark.Column, scan_mark); err != nil { + return err + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 { + if err := parser.updateBuffer(4); err != nil { + return err + } + } + + // Is it the end of the stream? + if isZeroChar(parser.buffer, parser.buffer_pos) { + return parser.fetchStreamEnd() + } + + // Is it a directive? + if parser.mark.Column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return parser.fetchDirective() + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.Column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && isBlankOrZero(buf, pos+3) { + return parser.fetchDocumentIndicator(DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.Column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && isBlankOrZero(buf, pos+3) { + return parser.fetchDocumentIndicator(DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].StartMark + } + defer func() { + if err != nil { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].Type == BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + err = parser.scanLineComment(comment_mark) + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return parser.fetchFlowCollectionStart(FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return parser.fetchFlowCollectionStart(FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return parser.fetchFlowCollectionEnd( + FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return parser.fetchFlowCollectionEnd( + FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return parser.fetchFlowEntry() + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && isBlankOrZero(parser.buffer, parser.buffer_pos+1) { + return parser.fetchBlockEntry() + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && isBlankOrZero(parser.buffer, parser.buffer_pos+1) { + return parser.fetchKey() + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 && !parser.isFlowSequence() || isBlankOrZero(parser.buffer, parser.buffer_pos+1)) { + return parser.fetchValue() + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return parser.fetchAnchor(ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return parser.fetchAnchor(ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return parser.fetchTag() + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return parser.fetchBlockScalar(true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return parser.fetchBlockScalar(false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return parser.fetchFlowScalar(true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return parser.fetchFlowScalar(false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(isBlankOrZero(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !isBlank(parser.buffer, parser.buffer_pos+1)) || + ((parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !isBlankOrZero(parser.buffer, parser.buffer_pos+1)) { + return parser.fetchPlainScalar() + } + + // If we don't determine the token type so far, it is an error. + return formatScannerErrorContext( + "while scanning for the next token", parser.mark, + "found character that cannot start any token", parser.mark) +} + +func (parser *Parser) isFlowSequence() bool { + if len(parser.tokens) == 0 { + return false + } + previousToken := parser.tokens[len(parser.tokens)-1] + return previousToken.Type == FLOW_ENTRY_TOKEN || previousToken.Type == FLOW_SEQUENCE_START_TOKEN +} + +// Check if a simple key may start at the current position and add it if +// needed. +func (parser *Parser) saveSimpleKey() error { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.Column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + if err := parser.removeSimpleKey(); err != nil { + return err + } + + parser.simple_key_possible = true + parser.simple_key = SimpleKey{ + required: required, + flow_level: parser.flow_level, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + } + return nil +} + +// Remove a potential simple key at the current flow level. +func (parser *Parser) removeSimpleKey() error { + // If the key is required, it is an error. + if parser.simple_key.required { + return formatScannerErrorContext( + "while scanning a simple key", parser.simple_key.mark, + "could not find expected ':'", parser.mark) + } + + parser.simple_key_possible = false // disable the key + return nil +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func (parser *Parser) increaseFlowLevel() error { + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return formatScannerErrorContext( + "while increasing flow level", parser.simple_key.mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level), parser.mark) + } + + // If a simple key was possible, push it to the stack before resetting the key. + if parser.simple_key_possible { + parser.simple_key_stack = append(parser.simple_key_stack, parser.simple_key) + } + + // Reset the simple key for the new flow level. + parser.simple_key = SimpleKey{} + + return nil +} + +// Decrease the flow level. +func (parser *Parser) decreaseFlowLevel() error { + if parser.flow_level > 0 { + parser.flow_level-- + + if len(parser.simple_key_stack) == 0 { + return nil + } + + last := len(parser.simple_key_stack) - 1 + if parser.simple_key_stack[last].flow_level == parser.flow_level { + parser.simple_key = parser.simple_key_stack[last] // use last item + parser.simple_key_stack = parser.simple_key_stack[:last] // remove last item + parser.simple_key_possible = true // enable the key + } + } + return nil +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func (parser *Parser) rollIndent(column, number int, typ TokenType, mark Mark) error { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return nil + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return formatScannerErrorContext( + "while increasing indent level", parser.simple_key.mark, + fmt.Sprintf("exceeded max depth of %d", max_indents), parser.mark) + } + + // Create a token and insert it into the queue. + token := Token{ + Type: typ, + StartMark: mark, + EndMark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + parser.insertToken(number, &token) + } + return nil +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func (parser *Parser) unrollIndent(column int, scan_mark Mark) error { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return nil + } + + block_mark := scan_mark + block_mark.Index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.Index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.EndMark.Index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.StartMark.Column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.StartMark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.ScanMark.Index + } + + // Create a token and append it to the queue. + token := Token{ + Type: BLOCK_END_TOKEN, + StartMark: block_mark, + EndMark: block_mark, + } + parser.insertToken(-1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return nil +} + +// Initialize the scanner and produce the STREAM-START token. +func (parser *Parser) fetchStreamStart() error { + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_key = SimpleKey{} + parser.simple_key_stack = []SimpleKey{} + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := Token{ + Type: STREAM_START_TOKEN, + StartMark: parser.mark, + EndMark: parser.mark, + encoding: parser.encoding, + } + parser.insertToken(-1, &token) + return nil +} + +// Produce the STREAM-END token and shut down the scanner. +func (parser *Parser) fetchStreamEnd() error { + // Force new line. + if parser.mark.Column != 0 { + parser.mark.Column = 0 + parser.mark.Line++ + } + + // Reset the indentation level. + if err := parser.unrollIndent(-1, parser.mark); err != nil { + return err + } + + // Reset simple keys. + if err := parser.removeSimpleKey(); err != nil { + return err + } + parser.simple_key = SimpleKey{} + parser.simple_key_stack = []SimpleKey{} + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := Token{ + Type: STREAM_END_TOKEN, + StartMark: parser.mark, + EndMark: parser.mark, + } + parser.insertToken(-1, &token) + return nil +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func (parser *Parser) fetchDirective() error { + // Reset the indentation level. + if err := parser.unrollIndent(-1, parser.mark); err != nil { + return err + } + + // Reset simple keys. + if err := parser.removeSimpleKey(); err != nil { + return err + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := Token{} + if err := parser.scanDirective(&token); err != nil { + return err + } + // Append the token to the queue. + parser.insertToken(-1, &token) + return nil +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func (parser *Parser) fetchDocumentIndicator(typ TokenType) error { + // Reset the indentation level. + if err := parser.unrollIndent(-1, parser.mark); err != nil { + return err + } + + // Reset simple keys. + if err := parser.removeSimpleKey(); err != nil { + return err + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + parser.skip() + parser.skip() + parser.skip() + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := Token{ + Type: typ, + StartMark: start_mark, + EndMark: end_mark, + } + // Append the token to the queue. + parser.insertToken(-1, &token) + return nil +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func (parser *Parser) fetchFlowCollectionStart(typ TokenType) error { + // The indicators '[' and '{' may start a simple key. + if err := parser.saveSimpleKey(); err != nil { + return err + } + + // Increase the flow level. + if err := parser.increaseFlowLevel(); err != nil { + return err + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + parser.skip() + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := Token{ + Type: typ, + StartMark: start_mark, + EndMark: end_mark, + } + // Append the token to the queue. + parser.insertToken(-1, &token) + return nil +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func (parser *Parser) fetchFlowCollectionEnd(typ TokenType) error { + // Reset any potential simple key on the current flow level. + if err := parser.removeSimpleKey(); err != nil { + return err + } + + // Decrease the flow level. + if err := parser.decreaseFlowLevel(); err != nil { + return err + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + parser.skip() + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := Token{ + Type: typ, + StartMark: start_mark, + EndMark: end_mark, + } + // Append the token to the queue. + parser.insertToken(-1, &token) + return nil +} + +// Produce the FLOW-ENTRY token. +func (parser *Parser) fetchFlowEntry() error { + // Reset any potential simple keys on the current flow level. + if err := parser.removeSimpleKey(); err != nil { + return err + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + parser.skip() + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := Token{ + Type: FLOW_ENTRY_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + } + parser.insertToken(-1, &token) + return nil +} + +// Produce the BLOCK-ENTRY token. +func (parser *Parser) fetchBlockEntry() error { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return formatScannerError("block sequence entries are not allowed in this context", parser.mark) + } + // Add the BLOCK-SEQUENCE-START token if needed. + if err := parser.rollIndent(parser.mark.Column, -1, BLOCK_SEQUENCE_START_TOKEN, parser.mark); err != nil { + return err + } + } else { //nolint:staticcheck // there is no problem with this empty branch as it's documentation. + + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if err := parser.removeSimpleKey(); err != nil { + return err + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + parser.skip() + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := Token{ + Type: BLOCK_ENTRY_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + } + parser.insertToken(-1, &token) + return nil +} + +// Produce the KEY token. +func (parser *Parser) fetchKey() error { + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not necessary simple). + if !parser.simple_key_allowed { + return formatScannerError("mapping keys are not allowed in this context", parser.mark) + } + // Add the BLOCK-MAPPING-START token if needed. + if err := parser.rollIndent(parser.mark.Column, -1, BLOCK_MAPPING_START_TOKEN, parser.mark); err != nil { + return err + } + } + + // Reset any potential simple keys on the current flow level. + if err := parser.removeSimpleKey(); err != nil { + return err + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + parser.skip() + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := Token{ + Type: KEY_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + } + parser.insertToken(-1, &token) + return nil +} + +// Produce the VALUE token. +func (parser *Parser) fetchValue() error { + simple_key := &parser.simple_key + + // Have we found a simple key? + if parser.simple_key_possible && simple_key.mark.Line == parser.mark.Line { + // Create the KEY token and insert it into the queue. + token := Token{ + Type: KEY_TOKEN, + StartMark: simple_key.mark, + EndMark: simple_key.mark, + } + parser.insertToken(simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if err := parser.rollIndent(simple_key.mark.Column, + simple_key.token_number, + BLOCK_MAPPING_START_TOKEN, simple_key.mark); err != nil { + return err + } + + // Remove the simple key. + parser.simple_key_possible = false + simple_key.required = false + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return formatScannerError("mapping values are not allowed in this context", parser.mark) + } + + // Add the BLOCK-MAPPING-START token if needed. + if err := parser.rollIndent(parser.mark.Column, -1, BLOCK_MAPPING_START_TOKEN, parser.mark); err != nil { + return err + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + parser.skip() + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := Token{ + Type: VALUE_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + } + parser.insertToken(-1, &token) + return nil +} + +// Produce the ALIAS or ANCHOR token. +func (parser *Parser) fetchAnchor(typ TokenType) error { + // An anchor or an alias could be a simple key. + if err := parser.saveSimpleKey(); err != nil { + return err + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token Token + if err := parser.scanAnchor(&token, typ); err != nil { + return err + } + parser.insertToken(-1, &token) + return nil +} + +// Produce the TAG token. +func (parser *Parser) fetchTag() error { + // A tag could be a simple key. + if err := parser.saveSimpleKey(); err != nil { + return err + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token Token + if err := parser.scanTag(&token); err != nil { + return err + } + parser.insertToken(-1, &token) + return nil +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func (parser *Parser) fetchBlockScalar(literal bool) error { + // Remove any potential simple keys. + if err := parser.removeSimpleKey(); err != nil { + return err + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token Token + if err := parser.scanBlockScalar(&token, literal); err != nil { + return err + } + parser.insertToken(-1, &token) + return nil +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func (parser *Parser) fetchFlowScalar(single bool) error { + // A plain scalar could be a simple key. + if err := parser.saveSimpleKey(); err != nil { + return err + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token Token + if err := parser.scanFlowScalar(&token, single); err != nil { + return err + } + parser.insertToken(-1, &token) + return nil +} + +// Produce the SCALAR(...,plain) token. +func (parser *Parser) fetchPlainScalar() error { + // A plain scalar could be a simple key. + if err := parser.saveSimpleKey(); err != nil { + return err + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token Token + if err := parser.scanPlainScalar(&token); err != nil { + return err + } + parser.insertToken(-1, &token) + return nil +} + +// Eat whitespaces and comments until the next token is found. +func (parser *Parser) scanToNextToken() error { + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + if parser.mark.Column == 0 && isBOM(parser.buffer, parser.buffer_pos) { + parser.skip() + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + parser.skip() + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.Type == BLOCK_SEQUENCE_START_TOKEN && tokenB.Type == BLOCK_ENTRY_TOKEN && len(comment.Line) > 0 && !isLineBreak(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.Head = comment.Line + comment.Line = nil + if comment.StartMark.Line == parser.mark.Line-1 { + comment.TokenMark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if err := parser.scanComments(scan_mark); err != nil { + return err + } + } + + // If it is a line break, eat it. + if isLineBreak(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 { + if err := parser.updateBuffer(2); err != nil { + return err + } + } + parser.skipLine() + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return nil +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func (parser *Parser) scanDirective(token *Token) error { + // Eat '%'. + start_mark := parser.mark + parser.skip() + + // Scan the directive name. + var name []byte + if err := parser.scanDirectiveName(start_mark, &name); err != nil { + return err + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if err := parser.scanVersionDirectiveValue(start_mark, &major, &minor); err != nil { + return err + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = Token{ + Type: VERSION_DIRECTIVE_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if err := parser.scanTagDirectiveValue(start_mark, &handle, &prefix); err != nil { + return err + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = Token{ + Type: TAG_DIRECTIVE_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + Value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + return formatScannerErrorContext("while scanning a directive", start_mark, + "found unknown directive name", parser.mark) + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + + for isBlank(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !parser.ScanLineComment(start_mark) { + // return false + //} + for !isBreakOrZero(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + } + + // Check if we are at the end of the line. + if !isBreakOrZero(parser.buffer, parser.buffer_pos) { + return formatScannerErrorContext("while scanning a directive", start_mark, + "did not find expected comment or line break", parser.mark) + } + + // Eat a line break. + if isLineBreak(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 { + if err := parser.updateBuffer(2); err != nil { + return err + } + } + parser.skipLine() + } + + return nil +} + +// Scan the directive name. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +func (parser *Parser) scanDirectiveName(start_mark Mark, name *[]byte) error { + // Consume the directive name. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + + var s []byte + for isAlpha(parser.buffer, parser.buffer_pos) { + s = parser.read(s) + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + + // Check if the name is empty. + if len(s) == 0 { + return formatScannerErrorContext("while scanning a directive", start_mark, + "could not find expected directive name", parser.mark) + } + + // Check for an blank character after the name. + if !isBlankOrZero(parser.buffer, parser.buffer_pos) { + return formatScannerErrorContext("while scanning a directive", start_mark, + "found unexpected non-alphabetical character", parser.mark) + } + *name = s + return nil +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^ +func (parser *Parser) scanVersionDirectiveValue(start_mark Mark, major, minor *int8) error { + // Eat whitespaces. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + for isBlank(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + + // Consume the major version number. + if err := parser.scanVersionDirectiveNumber(start_mark, major); err != nil { + return err + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return formatScannerErrorContext("while scanning a %YAML directive", start_mark, + "did not find expected digit or '.' character", parser.mark) + } + + parser.skip() + + // Consume the minor version number. + if err := parser.scanVersionDirectiveNumber(start_mark, minor); err != nil { + return err + } + return nil +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func (parser *Parser) scanVersionDirectiveNumber(start_mark Mark, number *int8) error { + // Repeat while the next character is digit. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + var value, length int8 + for isDigit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return formatScannerErrorContext("while scanning a %YAML directive", start_mark, + "found extremely long version number", parser.mark) + } + value = value*10 + int8(asDigit(parser.buffer, parser.buffer_pos)) + parser.skip() + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + + // Check if the number was present. + if length == 0 { + return formatScannerErrorContext("while scanning a %YAML directive", start_mark, + "did not find expected version number", parser.mark) + } + *number = value + return nil +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func (parser *Parser) scanTagDirectiveValue(start_mark Mark, handle, prefix *[]byte) error { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + + for isBlank(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + + // Scan a handle. + if err := parser.scanTagHandle(true, start_mark, &handle_value); err != nil { + return err + } + + // Expect a whitespace. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + if !isBlank(parser.buffer, parser.buffer_pos) { + return formatScannerErrorContext("while scanning a %TAG directive", start_mark, + "did not find expected whitespace", parser.mark) + } + + // Eat whitespaces. + for isBlank(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + + // Scan a prefix (TAG directive URI - flow indicators allowed). + if err := parser.scanTagURI(true, true, nil, start_mark, &prefix_value); err != nil { + return err + } + + // Expect a whitespace or line break. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + if !isBlankOrZero(parser.buffer, parser.buffer_pos) { + return formatScannerErrorContext("while scanning a %TAG directive", start_mark, + "did not find expected whitespace or line break", parser.mark) + } + + *handle = handle_value + *prefix = prefix_value + return nil +} + +func (parser *Parser) scanAnchor(token *Token, typ TokenType) error { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + parser.skip() + + // Consume the value. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + + for isAnchorChar(parser.buffer, parser.buffer_pos) { + s = parser.read(s) + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(isBlankOrZero(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == ANCHOR_TOKEN { + context = "while scanning an anchor" + } + return formatScannerErrorContext(context, start_mark, + "did not find expected alphabetic or numeric character", parser.mark) + } + + // Create a token. + *token = Token{ + Type: typ, + StartMark: start_mark, + EndMark: end_mark, + Value: s, + } + + return nil +} + +/* + * Scan a TAG token. + */ + +func (parser *Parser) scanTag(token *Token) error { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 { + if err := parser.updateBuffer(2); err != nil { + return err + } + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + parser.skip() + parser.skip() + + // Consume the tag value (verbatim tag - flow indicators allowed). + if err := parser.scanTagURI(false, true, nil, start_mark, &suffix); err != nil { + return err + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + return formatScannerErrorContext("while scanning a tag", start_mark, + "did not find the expected '>'", parser.mark) + } + + parser.skip() + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if err := parser.scanTagHandle(false, start_mark, &handle); err != nil { + return err + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now (short form - flow indicators not allowed). + if err := parser.scanTagURI(false, false, nil, start_mark, &suffix); err != nil { + return err + } + } else { + // It wasn't a handle after all. Scan the rest of the tag (short form). + if err := parser.scanTagURI(false, false, handle, start_mark, &suffix); err != nil { + return err + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + if !isBlankOrZero(parser.buffer, parser.buffer_pos) { + return formatScannerErrorContext("while scanning a tag", start_mark, + "did not find expected whitespace or line break", parser.mark) + } + + end_mark := parser.mark + + // Create a token. + *token = Token{ + Type: TAG_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + Value: handle, + suffix: suffix, + } + return nil +} + +// Scan a tag handle. +func (parser *Parser) scanTagHandle(directive bool, start_mark Mark, handle *[]byte) error { + // Check the initial '!' character. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + if parser.buffer[parser.buffer_pos] != '!' { + return parser.setScannerTagError(directive, + start_mark, "did not find expected '!'") + } + + var s []byte + + // Copy the '!' character. + s = parser.read(s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + for isAlpha(parser.buffer, parser.buffer_pos) { + s = parser.read(s) + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = parser.read(s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + return parser.setScannerTagError(directive, + start_mark, "did not find expected '!'") + } + } + + *handle = s + return nil +} + +// Scan a tag URI. +// directive: true if scanning a %TAG directive URI +// verbatim: true if scanning a verbatim tag !<...> or TAG directive (flow indicators allowed) +func (parser *Parser) scanTagURI(directive bool, verbatim bool, head []byte, start_mark Mark, uri *[]byte) error { + // size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', '.', '!', '~', '*', '\'', '(', ')', '%'. + // + // Note: Flow indicators (',', '[', ']', '{', '}') are only allowed in verbatim tags. + for isTagURIChar(parser.buffer, parser.buffer_pos, verbatim) { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if err := parser.scanURIEscapes(directive, start_mark, &s); err != nil { + return err + } + } else { + s = parser.read(s) + } + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + hasTag = true + } + + // Check for characters which are not allowed in tags. + // For non-verbatim tags, if we stopped at a printable character that isn't whitespace, + // it's an invalid tag character - give a specific error. + // For verbatim tags, the caller will check for the expected '>' delimiter. + if !verbatim { + c := parser.buffer[parser.buffer_pos] + if !isBlankOrZero(parser.buffer, parser.buffer_pos) && + c >= 0x20 && c <= 0x7E { + return parser.setScannerTagError(directive, start_mark, + fmt.Sprintf("found character '%c' that is not allowed in a YAML tag", c)) + } + } + + if !hasTag { + return parser.setScannerTagError(directive, + start_mark, "did not find expected tag URI") + } + *uri = s + return nil +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func (parser *Parser) scanURIEscapes(directive bool, start_mark Mark, s *[]byte) error { + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 { + if err := parser.updateBuffer(3); err != nil { + return err + } + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + isHex(parser.buffer, parser.buffer_pos+1) && + isHex(parser.buffer, parser.buffer_pos+2)) { + return parser.setScannerTagError(directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((asHex(parser.buffer, parser.buffer_pos+1) << 4) + asHex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return parser.setScannerTagError(directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return parser.setScannerTagError(directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + parser.skip() + parser.skip() + parser.skip() + w-- + } + return nil +} + +// Scan a block scalar. +func (parser *Parser) scanBlockScalar(token *Token, literal bool) error { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + parser.skip() + + // Scan the additional block scalar indicators. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + parser.skip() + + // Check for an indentation indicator. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + if isDigit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + return formatScannerErrorContext("while scanning a block scalar", start_mark, + "found an indentation indicator equal to 0", parser.mark) + } + + // Get the indentation level and eat the indicator. + increment = asDigit(parser.buffer, parser.buffer_pos) + parser.skip() + } + + } else if isDigit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + return formatScannerErrorContext("while scanning a block scalar", start_mark, + "found an indentation indicator equal to 0", parser.mark) + } + increment = asDigit(parser.buffer, parser.buffer_pos) + parser.skip() + + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + parser.skip() + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + for isBlank(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if err := parser.scanLineComment(start_mark); err != nil { + return err + } + for !isBreakOrZero(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + } + + // Check if we are at the end of the line. + if !isBreakOrZero(parser.buffer, parser.buffer_pos) { + return formatScannerErrorContext("while scanning a block scalar", start_mark, + "did not find expected comment or line break", parser.mark) + } + + // Eat a line break. + if isLineBreak(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 { + if err := parser.updateBuffer(2); err != nil { + return err + } + } + parser.skipLine() + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if err := parser.scanBlockScalarBreaks(&indent, &trailing_breaks, start_mark, &end_mark); err != nil { + return err + } + + // Scan the block scalar content. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + var leading_blank, trailing_blank bool + for parser.mark.Column == indent && !isZeroChar(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = isBlank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = isBlank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !isBreakOrZero(parser.buffer, parser.buffer_pos) { + s = parser.read(s) + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + + // Consume the line break. + if parser.unread < 2 { + if err := parser.updateBuffer(2); err != nil { + return err + } + } + + leading_break = parser.readLine(leading_break) + + // Eat the following indentation spaces and line breaks. + if err := parser.scanBlockScalarBreaks(&indent, &trailing_breaks, start_mark, &end_mark); err != nil { + return err + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = Token{ + Type: SCALAR_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + Value: s, + Style: LITERAL_SCALAR_STYLE, + } + if !literal { + token.Style = FOLDED_SCALAR_STYLE + } + return nil +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func (parser *Parser) scanBlockScalarBreaks(indent *int, breaks *[]byte, start_mark Mark, end_mark *Mark) error { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + for (*indent == 0 || parser.mark.Column < *indent) && isSpace(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + if parser.mark.Column > max_indent { + max_indent = parser.mark.Column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.Column < *indent) && isTab(parser.buffer, parser.buffer_pos) { + return formatScannerErrorContext("while scanning a block scalar", start_mark, + "found a tab character where an indentation space is expected", parser.mark) + } + + // Have we found a non-empty line? + if !isLineBreak(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 { + if err := parser.updateBuffer(2); err != nil { + return err + } + } + // [Go] Should really be returning breaks instead. + *breaks = parser.readLine(*breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return nil +} + +// Scan a quoted scalar. +func (parser *Parser) scanFlowScalar(token *Token, single bool) error { + // Eat the left quote. + start_mark := parser.mark + parser.skip() + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 { + if err := parser.updateBuffer(4); err != nil { + return err + } + } + + if parser.mark.Column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + isBlankOrZero(parser.buffer, parser.buffer_pos+3) { + return formatScannerErrorContext("while scanning a quoted scalar", start_mark, + "found unexpected document indicator", parser.mark) + } + + // Check for EOF. + if isZeroChar(parser.buffer, parser.buffer_pos) { + return formatScannerErrorContext("while scanning a quoted scalar", start_mark, + "found unexpected end of stream", parser.mark) + } + + // Consume non-blank characters. + leading_blanks := false + for !isBlankOrZero(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + parser.skip() + parser.skip() + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && isLineBreak(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 { + if err := parser.updateBuffer(3); err != nil { + return err + } + } + parser.skip() + parser.skipLine() + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + return formatScannerErrorContext("while scanning a quoted scalar", start_mark, + "found unknown escape character", parser.mark) + } + + parser.skip() + parser.skip() + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length { + if err := parser.updateBuffer(code_length); err != nil { + return err + } + } + for k := 0; k < code_length; k++ { + if !isHex(parser.buffer, parser.buffer_pos+k) { + return formatScannerErrorContext("while scanning a quoted scalar", start_mark, + "did not find expected hexadecimal number", parser.mark) + } + value = (value << 4) + asHex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + return formatScannerErrorContext("while scanning a quoted scalar", start_mark, + "found invalid Unicode character escape code", parser.mark) + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + parser.skip() + } + } + } else { + // It is a non-escaped non-blank character. + s = parser.read(s) + } + if parser.unread < 2 { + if err := parser.updateBuffer(2); err != nil { + return err + } + } + } + + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for isBlank(parser.buffer, parser.buffer_pos) || isLineBreak(parser.buffer, parser.buffer_pos) { + if isBlank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = parser.read(whitespaces) + } else { + parser.skip() + } + } else { + if parser.unread < 2 { + if err := parser.updateBuffer(2); err != nil { + return err + } + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = parser.readLine(leading_break) + leading_blanks = true + } else { + trailing_breaks = parser.readLine(trailing_breaks) + } + } + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + parser.skip() + end_mark := parser.mark + + // Create a token. + *token = Token{ + Type: SCALAR_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + Value: s, + Style: SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.Style = DOUBLE_QUOTED_SCALAR_STYLE + } + return nil +} + +// Scan a plain scalar. +func (parser *Parser) scanPlainScalar(token *Token) error { + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + indent := parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 { + if err := parser.updateBuffer(4); err != nil { + return err + } + } + if parser.mark.Column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + isBlankOrZero(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !isBlankOrZero(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && isBlankOrZero(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + (parser.buffer[parser.buffer_pos] == '?' && isBlankOrZero(parser.buffer, parser.buffer_pos+1)) || + parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = parser.read(s) + + end_mark = parser.mark + if parser.unread < 2 { + if err := parser.updateBuffer(2); err != nil { + return err + } + } + } + + // Is it the end? + if !(isBlank(parser.buffer, parser.buffer_pos) || isLineBreak(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + + for isBlank(parser.buffer, parser.buffer_pos) || isLineBreak(parser.buffer, parser.buffer_pos) { + if isBlank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.Column < indent && isTab(parser.buffer, parser.buffer_pos) { + return formatScannerErrorContext("while scanning a plain scalar", start_mark, + "found a tab character that violates indentation", parser.mark) + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = parser.read(whitespaces) + } else { + parser.skip() + } + } else { + if parser.unread < 2 { + if err := parser.updateBuffer(2); err != nil { + return err + } + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = parser.readLine(leading_break) + leading_blanks = true + } else { + trailing_breaks = parser.readLine(trailing_breaks) + } + } + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.Column < indent { + break + } + } + + // Create a token. + *token = Token{ + Type: SCALAR_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + Value: s, + Style: PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return nil +} + +func (parser *Parser) scanLineComment(token_mark Mark) error { + if parser.newlines > 0 { + return nil + } + + var start_mark Mark + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 { + if parser.updateBuffer(peek+1) != nil { + break + } + } + if isBlank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.Index + peek + for { + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + if isBreakOrZero(parser.buffer, parser.buffer_pos) { + if parser.mark.Index >= seen { + break + } + if parser.unread < 2 { + if err := parser.updateBuffer(2); err != nil { + return err + } + } + parser.skipLine() + } else if parser.mark.Index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = parser.read(text) + } else { + parser.skip() + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, Comment{ + ScanMark: token_mark, + TokenMark: token_mark, + StartMark: start_mark, + EndMark: parser.mark, + Line: text, + }) + } + return nil +} + +func (parser *Parser) scanComments(scan_mark Mark) error { + token := parser.tokens[len(parser.tokens)-1] + + if token.Type == FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + token_mark := token.StartMark + var start_mark Mark + next_indent := parser.indent + if next_indent < 0 { + next_indent = 0 + } + + recent_empty := false + first_empty := parser.newlines <= 1 + + line := parser.mark.Line + column := parser.mark.Column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + foot_line := -1 + if scan_mark.Line > 0 { + foot_line = parser.mark.Line - parser.newlines + 1 + if parser.newlines == 0 && parser.mark.Column > 1 { + foot_line++ + } + } + + peek := 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 { + if parser.updateBuffer(peek+1) != nil { + break + } + } + column++ + if isBlank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + close_flow := parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || isBreakOrZero(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.Line == foot_line && token.Type != VALUE_TOKEN || start_mark.Column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.Column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, Comment{ + ScanMark: scan_mark, + TokenMark: token_mark, + StartMark: start_mark, + EndMark: Mark{parser.mark.Index + peek, line, column}, + Foot: text, + }) + scan_mark = Mark{parser.mark.Index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !isLineBreak(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.Column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, Comment{ + ScanMark: scan_mark, + TokenMark: token_mark, + StartMark: start_mark, + EndMark: Mark{parser.mark.Index + peek, line, column}, + Foot: text, + }) + scan_mark = Mark{parser.mark.Index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = Mark{parser.mark.Index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.Index + peek + for { + if parser.unread < 1 { + if err := parser.updateBuffer(1); err != nil { + return err + } + } + if isBreakOrZero(parser.buffer, parser.buffer_pos) { + if parser.mark.Index >= seen { + break + } + if parser.unread < 2 { + if err := parser.updateBuffer(2); err != nil { + return err + } + } + parser.skipLine() + } else if parser.mark.Index >= seen { + text = parser.read(text) + } else { + parser.skip() + } + } + + peek = 0 + column = 0 + line = parser.mark.Line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, Comment{ + ScanMark: scan_mark, + TokenMark: start_mark, + StartMark: start_mark, + EndMark: Mark{parser.mark.Index + peek - 1, line, column}, + Head: text, + }) + } + return nil +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/serializer.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/serializer.go new file mode 100644 index 000000000000..7ec160dc8d1f --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/serializer.go @@ -0,0 +1,219 @@ +// Copyright 2011-2019 Canonical Ltd +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 + +// Serializer stage: Converts representation tree (Nodes) to event stream. +// Walks the node tree and produces events for the emitter. + +package libyaml + +import ( + "strings" + "unicode/utf8" +) + +// node serializes a Node tree into YAML events. +// This is the core of the serializer stage - it walks the tree and produces events. +func (r *Representer) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + r.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + tag := node.Tag + stag := shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag && stag != mergeTag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + event := NewDocumentStartEvent(noVersionDirective, noTagDirective, !r.explicitStart) + event.HeadComment = []byte(node.HeadComment) + r.emit(event) + for _, node := range node.Content { + r.node(node, "") + } + event = NewDocumentEndEvent(!r.explicitEnd) + event.FootComment = []byte(node.FootComment) + r.emit(event) + + case SequenceNode: + style := BLOCK_SEQUENCE_STYLE + // Use flow style if explicitly requested or if it's a simple + // collection (scalar-only contents that fit within line width, + // enabled via WithFlowSimpleCollections) + if node.Style&FlowStyle != 0 || r.isSimpleCollection(node) { + style = FLOW_SEQUENCE_STYLE + } + event := NewSequenceStartEvent([]byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + event.HeadComment = []byte(node.HeadComment) + r.emit(event) + for _, node := range node.Content { + r.node(node, "") + } + event = NewSequenceEndEvent() + event.LineComment = []byte(node.LineComment) + event.FootComment = []byte(node.FootComment) + r.emit(event) + + case MappingNode: + style := BLOCK_MAPPING_STYLE + // Use flow style if explicitly requested or if it's a simple + // collection (scalar-only contents that fit within line width, + // enabled via WithFlowSimpleCollections) + if node.Style&FlowStyle != 0 || r.isSimpleCollection(node) { + style = FLOW_MAPPING_STYLE + } + event := NewMappingStartEvent([]byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + event.TailComment = []byte(tail) + event.HeadComment = []byte(node.HeadComment) + r.emit(event) + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + r.node(k, tail) + tail = foot + + v := node.Content[i+1] + r.node(v, "") + } + + event = NewMappingEndEvent() + event.TailComment = []byte(tail) + event.LineComment = []byte(node.LineComment) + event.FootComment = []byte(node.FootComment) + r.emit(event) + + case AliasNode: + event := NewAliasEvent([]byte(node.Value)) + event.HeadComment = []byte(node.HeadComment) + event.LineComment = []byte(node.LineComment) + event.FootComment = []byte(node.FootComment) + r.emit(event) + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be represented directly as YAML so use a binary tag + // and represent it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = LITERAL_SCALAR_STYLE + case forceQuoting: + style = r.quotePreference.ScalarStyle() + } + + r.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot represent node with unknown kind %d", node.Kind) + } +} + +// isSimpleCollection checks if a node contains only scalar values and would +// fit within the line width when rendered in flow style. +func (r *Representer) isSimpleCollection(node *Node) bool { + if !r.flowSimpleCollections { + return false + } + if node.Kind != SequenceNode && node.Kind != MappingNode { + return false + } + // Check all children are scalars + for _, child := range node.Content { + if child.Kind != ScalarNode { + return false + } + } + // Estimate flow style length + estimatedLen := r.estimateFlowLength(node) + width := r.lineWidth + if width <= 0 { + width = 80 // Default width if not set + } + return estimatedLen > 0 && estimatedLen <= width +} + +// estimateFlowLength estimates the character length of a node in flow style. +func (r *Representer) estimateFlowLength(node *Node) int { + if node.Kind == SequenceNode { + // [item1, item2, ...] = 2 + sum(len(items)) + 2*(len-1) + length := 2 // [] + for i, child := range node.Content { + if i > 0 { + length += 2 // ", " + } + length += len(child.Value) + } + return length + } + if node.Kind == MappingNode { + // {key1: val1, key2: val2} = 2 + sum(key: val) + 2*(pairs-1) + length := 2 // {} + for i := 0; i < len(node.Content); i += 2 { + if i > 0 { + length += 2 // ", " + } + length += len(node.Content[i].Value) + 2 + len(node.Content[i+1].Value) // "key: val" + } + return length + } + return 0 +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/writer.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/writer.go new file mode 100644 index 000000000000..dcd51d6ce3cc --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/writer.go @@ -0,0 +1,31 @@ +// Copyright 2006-2010 Kirill Simonov +// Copyright 2011-2019 Canonical Ltd +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 AND MIT + +// Output writer with buffering. +// Provides write buffering for the emitter stage. + +package libyaml + +import "fmt" + +// Flush the output buffer. +func (emitter *Emitter) flush() error { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return nil + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return WriterError{ + Err: fmt.Errorf("write error: %w", err), + } + } + emitter.buffer_pos = 0 + return nil +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/yaml.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/yaml.go new file mode 100644 index 000000000000..f9f4f1f4938c --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/yaml.go @@ -0,0 +1,834 @@ +// Copyright 2006-2010 Kirill Simonov +// Copyright 2011-2019 Canonical Ltd +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 AND MIT + +// Core libyaml types and structures. +// Defines Parser, Emitter, Event, Token, and related constants for YAML +// processing. + +package libyaml + +import ( + "fmt" + "io" + "strings" +) + +// VersionDirective holds the YAML version directive data. +type VersionDirective struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// Major returns the major version number. +func (v *VersionDirective) Major() int { return int(v.major) } + +// Minor returns the minor version number. +func (v *VersionDirective) Minor() int { return int(v.minor) } + +// TagDirective holds the YAML tag directive data. +type TagDirective struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +// GetHandle returns the tag handle. +func (t *TagDirective) GetHandle() string { return string(t.handle) } + +// GetPrefix returns the tag prefix. +func (t *TagDirective) GetPrefix() string { return string(t.prefix) } + +type Encoding int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + ANY_ENCODING Encoding = iota + + UTF8_ENCODING // The default UTF-8 encoding. + UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type LineBreak int + +// Line break types. +const ( + // Let the parser choose the break type. + ANY_BREAK LineBreak = iota + + CR_BREAK // Use CR for line breaks (Mac style). + LN_BREAK // Use LN for line breaks (Unix style). + CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type QuoteStyle int + +// Quote style types for required quoting. +const ( + QuoteSingle QuoteStyle = iota // Prefer single quotes when quoting is required. + QuoteDouble // Prefer double quotes when quoting is required. + QuoteLegacy // Legacy behavior: double in representer, single in emitter. +) + +// ScalarStyle returns the scalar style for this quote preference in the +// representer/serializer context. +// In this context, both QuoteDouble and QuoteLegacy use double quotes. +func (q QuoteStyle) ScalarStyle() ScalarStyle { + if q == QuoteDouble || q == QuoteLegacy { + return DOUBLE_QUOTED_SCALAR_STYLE + } + return SINGLE_QUOTED_SCALAR_STYLE +} + +type ErrorType int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + NO_ERROR ErrorType = iota + + MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + READER_ERROR // Cannot read or decode the input stream. + SCANNER_ERROR // Cannot scan the input stream. + PARSER_ERROR // Cannot parse the input stream. + COMPOSER_ERROR // Cannot compose a YAML document. + WRITER_ERROR // Cannot write to the output stream. + EMITTER_ERROR // Cannot emit a YAML stream. +) + +// Mark holds the pointer position. +type Mark struct { + Index int // The position index. + Line int // The position line (1-indexed). + Column int // The position column (0-indexed internally, displayed as 1-indexed). +} + +func (m Mark) String() string { + var builder strings.Builder + if m.Line == 0 { + return "" + } + + fmt.Fprintf(&builder, "line %d", m.Line) + if m.Column != 0 { + fmt.Fprintf(&builder, ", column %d", m.Column+1) + } + + return builder.String() +} + +// Node Styles + +type styleInt int8 + +type ScalarStyle styleInt + +// Scalar styles. +const ( + // Let the emitter choose the style. + ANY_SCALAR_STYLE ScalarStyle = 0 + + PLAIN_SCALAR_STYLE ScalarStyle = 1 << iota // The plain scalar style. + SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + LITERAL_SCALAR_STYLE // The literal scalar style. + FOLDED_SCALAR_STYLE // The folded scalar style. +) + +// String returns a string representation of a [ScalarStyle]. +func (style ScalarStyle) String() string { + switch style { + case PLAIN_SCALAR_STYLE: + return "Plain" + case SINGLE_QUOTED_SCALAR_STYLE: + return "Single" + case DOUBLE_QUOTED_SCALAR_STYLE: + return "Double" + case LITERAL_SCALAR_STYLE: + return "Literal" + case FOLDED_SCALAR_STYLE: + return "Folded" + default: + return "" + } +} + +type SequenceStyle styleInt + +// Sequence styles. +const ( + // Let the emitter choose the style. + ANY_SEQUENCE_STYLE SequenceStyle = iota + + BLOCK_SEQUENCE_STYLE // The block sequence style. + FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type MappingStyle styleInt + +// Mapping styles. +const ( + // Let the emitter choose the style. + ANY_MAPPING_STYLE MappingStyle = iota + + BLOCK_MAPPING_STYLE // The block mapping style. + FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type TokenType int + +// Token types. +const ( + // An empty token. + NO_TOKEN TokenType = iota + + STREAM_START_TOKEN // A STREAM-START token. + STREAM_END_TOKEN // A STREAM-END token. + + VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + DOCUMENT_START_TOKEN // A DOCUMENT-START token. + DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + BLOCK_END_TOKEN // A BLOCK-END token. + + FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + KEY_TOKEN // A KEY token. + VALUE_TOKEN // A VALUE token. + + ALIAS_TOKEN // An ALIAS token. + ANCHOR_TOKEN // An ANCHOR token. + TAG_TOKEN // A TAG token. + SCALAR_TOKEN // A SCALAR token. + COMMENT_TOKEN // A COMMENT token. +) + +func (tt TokenType) String() string { + switch tt { + case NO_TOKEN: + return "NO_TOKEN" + case STREAM_START_TOKEN: + return "STREAM_START_TOKEN" + case STREAM_END_TOKEN: + return "STREAM_END_TOKEN" + case VERSION_DIRECTIVE_TOKEN: + return "VERSION_DIRECTIVE_TOKEN" + case TAG_DIRECTIVE_TOKEN: + return "TAG_DIRECTIVE_TOKEN" + case DOCUMENT_START_TOKEN: + return "DOCUMENT_START_TOKEN" + case DOCUMENT_END_TOKEN: + return "DOCUMENT_END_TOKEN" + case BLOCK_SEQUENCE_START_TOKEN: + return "BLOCK_SEQUENCE_START_TOKEN" + case BLOCK_MAPPING_START_TOKEN: + return "BLOCK_MAPPING_START_TOKEN" + case BLOCK_END_TOKEN: + return "BLOCK_END_TOKEN" + case FLOW_SEQUENCE_START_TOKEN: + return "FLOW_SEQUENCE_START_TOKEN" + case FLOW_SEQUENCE_END_TOKEN: + return "FLOW_SEQUENCE_END_TOKEN" + case FLOW_MAPPING_START_TOKEN: + return "FLOW_MAPPING_START_TOKEN" + case FLOW_MAPPING_END_TOKEN: + return "FLOW_MAPPING_END_TOKEN" + case BLOCK_ENTRY_TOKEN: + return "BLOCK_ENTRY_TOKEN" + case FLOW_ENTRY_TOKEN: + return "FLOW_ENTRY_TOKEN" + case KEY_TOKEN: + return "KEY_TOKEN" + case VALUE_TOKEN: + return "VALUE_TOKEN" + case ALIAS_TOKEN: + return "ALIAS_TOKEN" + case ANCHOR_TOKEN: + return "ANCHOR_TOKEN" + case TAG_TOKEN: + return "TAG_TOKEN" + case SCALAR_TOKEN: + return "SCALAR_TOKEN" + case COMMENT_TOKEN: + return "COMMENT_TOKEN" + } + return "" +} + +// Token holds information about a scanning token. +type Token struct { + // The token type. + Type TokenType + + // The start/end of the token. + StartMark, EndMark Mark + + // The stream encoding (for STREAM_START_TOKEN). + encoding Encoding + + // The alias/anchor/scalar Value or tag/tag directive handle + // (for ALIAS_TOKEN, ANCHOR_TOKEN, SCALAR_TOKEN, TAG_TOKEN, TAG_DIRECTIVE_TOKEN). + Value []byte + + // The tag suffix (for TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar Style (for SCALAR_TOKEN). + Style ScalarStyle + + // The version directive major/minor (for VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type EventType int8 + +// Event types. +const ( + // An empty event. + NO_EVENT EventType = iota + + STREAM_START_EVENT // A STREAM-START event. + STREAM_END_EVENT // A STREAM-END event. + DOCUMENT_START_EVENT // A DOCUMENT-START event. + DOCUMENT_END_EVENT // A DOCUMENT-END event. + ALIAS_EVENT // An ALIAS event. + SCALAR_EVENT // A SCALAR event. + SEQUENCE_START_EVENT // A SEQUENCE-START event. + SEQUENCE_END_EVENT // A SEQUENCE-END event. + MAPPING_START_EVENT // A MAPPING-START event. + MAPPING_END_EVENT // A MAPPING-END event. + TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + NO_EVENT: "none", + STREAM_START_EVENT: "stream start", + STREAM_END_EVENT: "stream end", + DOCUMENT_START_EVENT: "document start", + DOCUMENT_END_EVENT: "document end", + ALIAS_EVENT: "alias", + SCALAR_EVENT: "scalar", + SEQUENCE_START_EVENT: "sequence start", + SEQUENCE_END_EVENT: "sequence end", + MAPPING_START_EVENT: "mapping start", + MAPPING_END_EVENT: "mapping end", + TAIL_COMMENT_EVENT: "tail comment", +} + +func (e EventType) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// Event holds information about a parsing or emitting event. +type Event struct { + // The event type. + Type EventType + + // The start and end of the event. + StartMark, EndMark Mark + + // The document encoding (for STREAM_START_EVENT). + encoding Encoding + + // The version directive (for DOCUMENT_START_EVENT). + versionDirective *VersionDirective + + // The list of tag directives (for DOCUMENT_START_EVENT). + tagDirectives []TagDirective + + // The comments + HeadComment []byte + LineComment []byte + FootComment []byte + TailComment []byte + + // The Anchor (for SCALAR_EVENT, SEQUENCE_START_EVENT, MAPPING_START_EVENT, ALIAS_EVENT). + Anchor []byte + + // The Tag (for SCALAR_EVENT, SEQUENCE_START_EVENT, MAPPING_START_EVENT). + Tag []byte + + // The scalar Value (for SCALAR_EVENT). + Value []byte + + // Is the document start/end indicator Implicit, or the tag optional? + // (for DOCUMENT_START_EVENT, DOCUMENT_END_EVENT, SEQUENCE_START_EVENT, MAPPING_START_EVENT, SCALAR_EVENT). + Implicit bool + + // Is the tag optional for any non-plain style? (for SCALAR_EVENT). + quoted_implicit bool + + // The Style (for SCALAR_EVENT, SEQUENCE_START_EVENT, MAPPING_START_EVENT). + Style Style +} + +func (e *Event) ScalarStyle() ScalarStyle { return ScalarStyle(e.Style) } +func (e *Event) SequenceStyle() SequenceStyle { return SequenceStyle(e.Style) } +func (e *Event) MappingStyle() MappingStyle { return MappingStyle(e.Style) } + +// GetEncoding returns the stream encoding (for STREAM_START_EVENT). +func (e *Event) GetEncoding() Encoding { return e.encoding } + +// GetVersionDirective returns the version directive (for DOCUMENT_START_EVENT). +func (e *Event) GetVersionDirective() *VersionDirective { return e.versionDirective } + +// GetTagDirectives returns the tag directives (for DOCUMENT_START_EVENT). +func (e *Event) GetTagDirectives() []TagDirective { return e.tagDirectives } + +// Nodes + +const ( + NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + BINARY_TAG = "tag:yaml.org,2002:binary" + MERGE_TAG = "tag:yaml.org,2002:merge" + + DEFAULT_SCALAR_TAG = STR_TAG // The default scalar tag is !!str. + DEFAULT_SEQUENCE_TAG = SEQ_TAG // The default sequence tag is !!seq. + DEFAULT_MAPPING_TAG = MAP_TAG // The default mapping tag is !!map. +) + +type NodeType int + +// Node types. +const ( + // An empty node. + NO_NODE NodeType = iota + + SCALAR_NODE // A scalar node. + SEQUENCE_NODE // A sequence node. + MAPPING_NODE // A mapping node. +) + +// NodeItem represents an element of a sequence node. +type NodeItem int + +// NodePair represents an element of a mapping node. +type NodePair struct { + key int // The key of the element. + value int // The value of the element. +} + +// parserNode represents a single node in the YAML document tree. +type parserNode struct { + typ NodeType // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style ScalarStyle // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []NodeItem // The stack of sequence items. + style SequenceStyle // The sequence style. + } + + // The mapping parameters (for MAPPING_NODE). + mapping struct { + pairs_data []NodePair // The stack of mapping pairs (key, value). + pairs_start *NodePair // The beginning of the stack. + pairs_end *NodePair // The end of the stack. + pairs_top *NodePair // The top of the stack. + style MappingStyle // The mapping style. + } + + start_mark Mark // The beginning of the node. + end_mark Mark // The end of the node. +} + +// Document structure. +type Document struct { + // The document nodes. + nodes []parserNode + + // The version directive. + version_directive *VersionDirective + + // The list of tag directives. + tag_directives_data []TagDirective + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark Mark +} + +// ReadHandler is called when the [Parser] needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// +// yamlParser.setInput(). +// +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type ReadHandler func(parser *Parser, buffer []byte) (n int, err error) + +// SimpleKey holds information about a potential simple key. +type SimpleKey struct { + flow_level int // What flow level is the key at? + required bool // Is a simple key required? + token_number int // The number of the token. + mark Mark // The position mark. +} + +// ParserState represents the state of the parser. +type ParserState int + +const ( + PARSE_STREAM_START_STATE ParserState = iota + + PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + PARSE_BLOCK_NODE_STATE // Expect a block node. + PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + PARSE_END_STATE // Expect nothing. +) + +func (ps ParserState) String() string { + switch ps { + case PARSE_STREAM_START_STATE: + return "PARSE_STREAM_START_STATE" + case PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "PARSE_IMPLICIT_DOCUMENT_START_STATE" + case PARSE_DOCUMENT_START_STATE: + return "PARSE_DOCUMENT_START_STATE" + case PARSE_DOCUMENT_CONTENT_STATE: + return "PARSE_DOCUMENT_CONTENT_STATE" + case PARSE_DOCUMENT_END_STATE: + return "PARSE_DOCUMENT_END_STATE" + case PARSE_BLOCK_NODE_STATE: + return "PARSE_BLOCK_NODE_STATE" + case PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case PARSE_BLOCK_MAPPING_KEY_STATE: + return "PARSE_BLOCK_MAPPING_KEY_STATE" + case PARSE_BLOCK_MAPPING_VALUE_STATE: + return "PARSE_BLOCK_MAPPING_VALUE_STATE" + case PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case PARSE_FLOW_MAPPING_KEY_STATE: + return "PARSE_FLOW_MAPPING_KEY_STATE" + case PARSE_FLOW_MAPPING_VALUE_STATE: + return "PARSE_FLOW_MAPPING_VALUE_STATE" + case PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case PARSE_END_STATE: + return "PARSE_END_STATE" + } + return "" +} + +// AliasData holds information about aliases. +type AliasData struct { + anchor []byte // The anchor. + index int // The node id. + mark Mark // The anchor mark. +} + +// Parser structure holds all information about the current +// state of the parser. +type Parser struct { + lastError error + + // Reader stuff + read_handler ReadHandler // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding Encoding // The input encoding. + + offset int // The offset of the current position (in bytes). + mark Mark // The mark of the current position. + + // Comments + + HeadComment []byte // The current head comments + LineComment []byte // The current line comments + FootComment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []Comment // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []Token // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_key_possible bool // Is the current simple key possible? + simple_key SimpleKey // The current simple key. + simple_key_stack []SimpleKey // The stack of simple keys. + + // Parser stuff + + state ParserState // The current parser state. + states []ParserState // The parser states stack. + marks []Mark // The stack of marks. + tag_directives []TagDirective // The list of TAG directives. + + // Representer stuff + + aliases []AliasData // The alias data. + + document *Document // The currently parsed document. +} + +type Comment struct { + ScanMark Mark // Position where scanning for comments started + TokenMark Mark // Position after which tokens will be associated with this comment + StartMark Mark // Position of '#' comment mark + EndMark Mark // Position where comment terminated + + Head []byte + Line []byte + Foot []byte +} + +// Emitter Definitions + +// WriteHandler is called when the [Emitter] needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// +// yamlEmitter.setOutput(). +// +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +type WriteHandler func(emitter *Emitter, buffer []byte) error + +type EmitterState int + +// The emitter states. +const ( + // Expect STREAM-START. + EMIT_STREAM_START_STATE EmitterState = iota + + EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + EMIT_END_STATE // Expect nothing. +) + +// Emitter holds all information about the current state of the emitter. +type Emitter struct { + // Writer stuff + + write_handler WriteHandler // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + encoding Encoding // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + BestIndent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break LineBreak // The preferred line break. + quotePreference QuoteStyle // Preferred quote style when quoting is required. + + state EmitterState // The current emitter state. + states []EmitterState // The stack of states. + + events []Event // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []TagDirective // The list of tag directives. + + indent int // The current indentation level. + + CompactSequenceIndent bool // Is '- ' is considered part of the indentation for sequence elements? + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + OpenEnded bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expressed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style ScalarStyle // The output style. + } + + // Comments + HeadComment []byte + LineComment []byte + FootComment []byte + TailComment []byte + + key_line_comment []byte + + // Representer stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *Document // The currently emitted document. +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/yamldatatest_loader.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/yamldatatest_loader.go new file mode 100644 index 000000000000..5ca2ff0eab5c --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/yamldatatest_loader.go @@ -0,0 +1,192 @@ +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 + +// YAML test data loading utilities. +// Provides helper functions for loading and processing YAML test data, +// including scalar coercion. + +package libyaml + +import ( + "errors" + "fmt" + "io" + "strings" +) + +// coerceScalar converts a YAML scalar string to an appropriate Go type +func coerceScalar(value string) any { + // Try bool and null + switch value { + case "true": + return true + case "false": + return false + case "null": + return nil + } + + // Try hex int (0x or 0X prefix) - needed for test data byte arrays + var intVal int + if _, err := fmt.Sscanf(strings.ToLower(value), "0x%x", &intVal); err == nil { + return intVal + } + + // Try float (must check before int because %d will parse "1.5" as "1") + if strings.Contains(value, ".") { + var floatVal float64 + if _, err := fmt.Sscanf(value, "%f", &floatVal); err == nil { + return floatVal + } + } + + // Try decimal int - use int64 to handle large values on 32-bit systems + var int64Val int64 + if _, err := fmt.Sscanf(value, "%d", &int64Val); err == nil { + // Return as int if it fits, otherwise int64 + if int64Val == int64(int(int64Val)) { + return int(int64Val) + } + return int64Val + } + + // Default to string + return value +} + +// LoadYAML parses YAML data using the native libyaml Parser. +// This function is exported so it can be used by other packages for data-driven testing. +// It returns a generic interface{} which is typically: +// - map[string]interface{} for YAML mappings +// - []interface{} for YAML sequences +// - scalar values, resolved according to the following rules: +// - Booleans: "true" and "false" are returned as bool (true/false). +// - Nulls: "null" is returned as nil. +// - Floats: values containing "." are parsed as float64. +// - Decimal integers: values matching integer format are parsed as int. +// - All other values are returned as string. +// +// This scalar resolution behavior matches the implementation in coerceScalar. +func LoadYAML(data []byte) (any, error) { + parser := NewParser() + parser.SetInputString(data) + defer parser.Delete() + + type stackEntry struct { + container any // map[string]interface{} or []interface{} + key string // for maps: current key waiting for value + } + + var stack []stackEntry + var root any + + for { + var event Event + if err := parser.Parse(&event); err != nil { + if errors.Is(err, io.EOF) { + break + } + return nil, err + } + + switch event.Type { + case STREAM_END_EVENT: + // End of stream, we're done + return root, nil + + case STREAM_START_EVENT, DOCUMENT_START_EVENT: + // Structural markers, no action needed + + case MAPPING_START_EVENT: + newMap := make(map[string]any) + stack = append(stack, stackEntry{container: newMap}) + + case MAPPING_END_EVENT: + if len(stack) > 0 { + popped := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + // Add completed map to parent or set as root + if len(stack) == 0 { + root = popped.container + } else { + parent := &stack[len(stack)-1] + if m, ok := parent.container.(map[string]any); ok { + m[parent.key] = popped.container + parent.key = "" // Reset key after use + } else if s, ok := parent.container.([]any); ok { + parent.container = append(s, popped.container) + } + } + } + + case SEQUENCE_START_EVENT: + newSlice := make([]any, 0) + stack = append(stack, stackEntry{container: newSlice}) + + case SEQUENCE_END_EVENT: + if len(stack) > 0 { + popped := stack[len(stack)-1] + stack = stack[:len(stack)-1] + + // Add completed slice to parent or set as root + if len(stack) == 0 { + root = popped.container + } else { + parent := &stack[len(stack)-1] + if m, ok := parent.container.(map[string]any); ok { + m[parent.key] = popped.container + parent.key = "" // Reset key after use + } else if s, ok := parent.container.([]any); ok { + parent.container = append(s, popped.container) + } + } + } + + case SCALAR_EVENT: + value := string(event.Value) + // Only coerce plain (unquoted) scalars + isQuoted := ScalarStyle(event.Style) != PLAIN_SCALAR_STYLE + + if len(stack) == 0 { + // Scalar at root level + if isQuoted { + root = value + } else { + root = coerceScalar(value) + } + } else { + parent := &stack[len(stack)-1] + if m, ok := parent.container.(map[string]any); ok { + if parent.key == "" { + // This scalar is a key - keep as string, don't coerce + parent.key = value + } else { + // This scalar is a value + if isQuoted { + m[parent.key] = value + } else { + m[parent.key] = coerceScalar(value) + } + parent.key = "" + } + } else if s, ok := parent.container.([]any); ok { + // Add to sequence + if isQuoted { + parent.container = append(s, value) + } else { + parent.container = append(s, coerceScalar(value)) + } + } + } + + case DOCUMENT_END_EVENT: + // Document end marker, continue processing + + case ALIAS_EVENT, TAIL_COMMENT_EVENT: + // For now, skip aliases and comments (not used in test data) + } + } + + return root, nil +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/yamlprivate.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/yamlprivate.go new file mode 100644 index 000000000000..d23af7d9fac0 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/yamlprivate.go @@ -0,0 +1,249 @@ +// Copyright 2006-2010 Kirill Simonov +// Copyright 2011-2019 Canonical Ltd +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 AND MIT + +// Internal constants and buffer sizes. +// Defines buffer sizes, stack sizes, and other internal configuration +// constants for libyaml. + +package libyaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func isAlpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || + b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a flow indicator as +// defined by spec production [23] c-flow-indicator ::= +// c-collect-entry | c-sequence-start | c-sequence-end | +// c-mapping-start | c-mapping-end +func isFlowIndicator(b []byte, i int) bool { + return b[i] == '[' || b[i] == ']' || + b[i] == '{' || b[i] == '}' || b[i] == ',' +} + +// Check if the character at the specified position is valid for anchor names +// as defined by spec production [102] ns-anchor-char ::= ns-char - +// c-flow-indicator. +// This includes all printable characters except: CR, LF, BOM, space, tab, '[', +// ']', '{', '}', ','. +// We further limit it to ascii chars only, which is a subset of the spec +// production but is usually what most people expect. +func isAnchorChar(b []byte, i int) bool { + if isColon(b, i) { + // [Go] we exclude colons from anchor/alias names. + // + // A colon is a valid anchor character according to the YAML 1.2 specification, + // but it can lead to ambiguity. + // https://github.com/yaml/go-yaml/issues/109 + // + // Also, it would have been a breaking change to support it, as go.yaml.in/yaml/v3 ignores it. + // Supporting it could lead to unexpected behavior. + return false + } + + return isPrintable(b, i) && + !isLineBreak(b, i) && + !isBlank(b, i) && + !isBOM(b, i) && + !isFlowIndicator(b, i) && + isASCII(b, i) +} + +// isColon checks whether the character at the specified position is a colon. +func isColon(b []byte, i int) bool { + return b[i] == ':' +} + +// Check if the character at the specified position is valid in a tag URI. +// +// The set of valid characters is: +// +// '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', +// '=', '+', '$', '.', '!', '~', '*', '\'', '(', ')', '%'. +// +// If verbatim is true, flow indicators (',', '[', ']', '{', '}') are also +// allowed. +func isTagURIChar(b []byte, i int, verbatim bool) bool { + c := b[i] + // isAlpha covers: 0-9, A-Z, a-z, _, - + if isAlpha(b, i) { + return true + } + // Check special URI characters + switch c { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', '.', '!', '~', '*', '\'', '(', ')', '%': + return true + case ',', '[', ']', '{', '}': + return verbatim + } + return false +} + +// Check if the character at the specified position is a digit. +func isDigit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func asDigit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func isHex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || + b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func asHex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func isASCII(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func isPrintable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func isZeroChar(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func isBOM(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func isSpace(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func isTab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func isBlank(b []byte, i int) bool { + // return isSpace(b, i) || isTab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func isLineBreak(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func isCRLF(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func isBreakOrZero(b []byte, i int) bool { + // return isLineBreak(b, i) || isZeroChar(b, i) + return ( + // isBreak: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // isZeroChar: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func isSpaceOrZero(b []byte, i int) bool { + // return isSpace(b, i) || isBreakOrZero(b, i) + return ( + // isSpace: + b[i] == ' ' || + // isBreakOrZero: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func isBlankOrZero(b []byte, i int) bool { + // return isBlank(b, i) || isBreakOrZero(b, i) + return ( + // isBlank: + b[i] == ' ' || b[i] == '\t' || + // isBreakOrZero: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 +} diff --git a/vendor/go.yaml.in/yaml/v4/loader.go b/vendor/go.yaml.in/yaml/v4/loader.go new file mode 100644 index 000000000000..aa9757fff1db --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/loader.go @@ -0,0 +1,231 @@ +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 + +// This file contains the Loader API for reading YAML documents. +// +// Primary functions: +// - Load: Decode YAML document(s) into a value (use WithAll for multi-doc) +// - NewLoader: Create a streaming loader from io.Reader + +package yaml + +import ( + "bytes" + "errors" + "io" + "reflect" + + "go.yaml.in/yaml/v4/internal/libyaml" +) + +// Load decodes YAML document(s) with the given options. +// +// By default, Load requires exactly one document in the input. +// If zero documents are found, it returns an error. +// If multiple documents are found, it returns an error. +// +// Use WithAllDocuments() to load all documents into a slice: +// +// var configs []Config +// yaml.Load(multiDocYAML, &configs, yaml.WithAllDocuments()) +// +// When WithAllDocuments is used, out must be a pointer to a slice. +// Each document is decoded into the slice element type. +// Zero documents results in an empty slice (no error). +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary. The out parameter +// must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.LoadErrors is returned with details for all +// missed values. +// +// Struct fields are only loaded if they are exported (have an upper case +// first letter), and are loaded using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options control the loading and dumping behavior. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Load([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Dump for the format of tags and a list of +// supported tag options. +func Load(in []byte, out any, opts ...Option) error { + o, err := libyaml.ApplyOptions(opts...) + if err != nil { + return err + } + + if o.AllDocuments { + // Multi-document mode: out must be pointer to slice + return loadAll(in, out, o) + } + + // Single-document mode: exactly one document required + return loadSingle(in, out, o) +} + +// loadAll loads all documents into a slice +func loadAll(in []byte, out any, opts *libyaml.Options) error { + outVal := reflect.ValueOf(out) + if outVal.Kind() != reflect.Pointer || outVal.IsNil() { + return &LoadErrors{Errors: []*libyaml.ConstructError{{ + Err: errors.New("yaml: WithAllDocuments requires a non-nil pointer to a slice"), + }}} + } + + sliceVal := outVal.Elem() + if sliceVal.Kind() != reflect.Slice { + return &LoadErrors{Errors: []*libyaml.ConstructError{{ + Err: errors.New("yaml: WithAllDocuments requires a pointer to a slice"), + }}} + } + + // Create a new slice (clear existing content) + sliceVal.Set(reflect.MakeSlice(sliceVal.Type(), 0, 0)) + + l, err := NewLoader(bytes.NewReader(in), func(o *libyaml.Options) error { + *o = *opts // Copy options + return nil + }) + if err != nil { + return err + } + + elemType := sliceVal.Type().Elem() + for { + // Create new element of slice's element type + elemPtr := reflect.New(elemType) + err := l.Load(elemPtr.Interface()) + if err == io.EOF { + break + } + if err != nil { + return err + } + // Append decoded element to slice + sliceVal.Set(reflect.Append(sliceVal, elemPtr.Elem())) + } + + return nil +} + +// loadSingle loads exactly one document (strict) +func loadSingle(in []byte, out any, opts *libyaml.Options) error { + l, err := NewLoader(bytes.NewReader(in), func(o *libyaml.Options) error { + *o = *opts // Copy options + return nil + }) + if err != nil { + return err + } + + // Load first document + err = l.Load(out) + if err == io.EOF { + return &LoadErrors{Errors: []*libyaml.ConstructError{{ + Err: errors.New("yaml: no documents in stream"), + }}} + } + if err != nil { + return err + } + + // Check for additional documents + var dummy any + err = l.Load(&dummy) + if err != io.EOF { + if err != nil { + // Some other error occurred + return err + } + // Successfully loaded a second document - this is an error in strict mode + return &LoadErrors{Errors: []*libyaml.ConstructError{{ + Err: errors.New("yaml: expected single document, found multiple"), + }}} + } + + return nil +} + +// A Loader reads and decodes YAML values from an input stream with configurable +// options. +type Loader struct { + composer *libyaml.Composer + decoder *libyaml.Constructor + opts *libyaml.Options + docCount int +} + +// NewLoader returns a new Loader that reads from r with the given options. +// +// The Loader introduces its own buffering and may read data from r beyond the +// YAML values requested. +func NewLoader(r io.Reader, opts ...Option) (*Loader, error) { + o, err := libyaml.ApplyOptions(opts...) + if err != nil { + return nil, err + } + c := libyaml.NewComposerFromReader(r) + c.SetStreamNodes(o.StreamNodes) + return &Loader{ + composer: c, + decoder: libyaml.NewConstructor(o), + opts: o, + }, nil +} + +// Load reads the next YAML-encoded document from its input and stores it +// in the value pointed to by v. +// +// Returns io.EOF when there are no more documents to read. +// If WithSingleDocument option was set and a document was already read, +// subsequent calls return io.EOF. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as v +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary. The v parameter +// must not be nil. +// +// Struct fields are only loaded if they are exported (have an upper case +// first letter), and are loaded using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options control the loading and dumping behavior. +// +// See the documentation of the package-level Load function for more details +// about YAML to Go conversion and tag options. +func (l *Loader) Load(v any) (err error) { + defer handleErr(&err) + if l.opts.SingleDocument && l.docCount > 0 { + return io.EOF + } + node := l.composer.Parse() // *libyaml.Node + if node == nil { + return io.EOF + } + l.docCount++ + + out := reflect.ValueOf(v) + if out.Kind() == reflect.Pointer && !out.IsNil() { + out = out.Elem() + } + l.decoder.Construct(node, out) // Pass libyaml.Node directly + if len(l.decoder.TypeErrors) > 0 { + typeErrors := l.decoder.TypeErrors + l.decoder.TypeErrors = nil + return &LoadErrors{Errors: typeErrors} + } + return nil +} diff --git a/vendor/go.yaml.in/yaml/v4/yaml.go b/vendor/go.yaml.in/yaml/v4/yaml.go new file mode 100644 index 000000000000..684d660d294c --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/yaml.go @@ -0,0 +1,732 @@ +// Copyright 2011-2019 Canonical Ltd +// Copyright 2025 The go-yaml Project Contributors +// SPDX-License-Identifier: Apache-2.0 + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/yaml/go-yaml +// +// This file contains: +// - Version presets (V2, V3, V4) +// - Options API (WithIndent, WithKnownFields, etc.) +// - Type and constant re-exports from internal/libyaml +// - Helper functions for struct field handling +// - Classic APIs (Decoder, Encoder, Unmarshal, Marshal) +// +// For the main API, see: +// - loader.go: Load, Loader +// - dumper.go: Dump, Dumper + +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + + "go.yaml.in/yaml/v4/internal/libyaml" +) + +//----------------------------------------------------------------------------- +// Version presets +//----------------------------------------------------------------------------- + +// Usage: +// yaml.Dump(&data, yaml.V3) +// yaml.Dump(&data, yaml.V3, yaml.WithIndent(2), yaml.WithCompactSeqIndent()) + +// V2 defaults: +var V2 = Options( + WithIndent(2), + WithCompactSeqIndent(false), + WithLineWidth(80), + WithUnicode(true), + WithUniqueKeys(true), + WithQuotePreference(QuoteLegacy), +) + +// V3 defaults: +var V3 = Options( + WithIndent(4), + WithCompactSeqIndent(false), + WithLineWidth(80), + WithUnicode(true), + WithUniqueKeys(true), + WithQuotePreference(QuoteLegacy), +) + +// V4 defaults: +var V4 = Options( + WithIndent(2), + WithCompactSeqIndent(true), + WithLineWidth(80), + WithUnicode(true), + WithUniqueKeys(true), + WithQuotePreference(QuoteSingle), +) + +//----------------------------------------------------------------------------- +// Options +//----------------------------------------------------------------------------- + +// Option allows configuring YAML loading and dumping operations. +// Re-exported from internal/libyaml. +type Option = libyaml.Option + +var ( + // WithIndent sets indentation spaces (2-9). + // See internal/libyaml.WithIndent. + WithIndent = libyaml.WithIndent + // WithCompactSeqIndent configures '- ' as part of indentation. + // See internal/libyaml.WithCompactSeqIndent. + WithCompactSeqIndent = libyaml.WithCompactSeqIndent + // WithKnownFields enables strict field checking during loading. + // See internal/libyaml.WithKnownFields. + WithKnownFields = libyaml.WithKnownFields + // WithSingleDocument only processes first document in stream. + // See internal/libyaml.WithSingleDocument. + WithSingleDocument = libyaml.WithSingleDocument + // WithStreamNodes enables stream boundary nodes when loading. + // See internal/libyaml.WithStreamNodes. + WithStreamNodes = libyaml.WithStreamNodes + // WithAllDocuments enables multi-document mode for Load and Dump. + // See internal/libyaml.WithAllDocuments. + WithAllDocuments = libyaml.WithAllDocuments + // WithLineWidth sets preferred line width for output. + // See internal/libyaml.WithLineWidth. + WithLineWidth = libyaml.WithLineWidth + // WithUnicode controls non-ASCII characters in output. + // See internal/libyaml.WithUnicode. + WithUnicode = libyaml.WithUnicode + // WithUniqueKeys enables duplicate key detection. + // See internal/libyaml.WithUniqueKeys. + WithUniqueKeys = libyaml.WithUniqueKeys + // WithCanonical forces canonical YAML output format. + // See internal/libyaml.WithCanonical. + WithCanonical = libyaml.WithCanonical + // WithLineBreak sets line ending style for output. + // See internal/libyaml.WithLineBreak. + WithLineBreak = libyaml.WithLineBreak + // WithExplicitStart controls document start markers (---). + // See internal/libyaml.WithExplicitStart. + WithExplicitStart = libyaml.WithExplicitStart + // WithExplicitEnd controls document end markers (...). + // See internal/libyaml.WithExplicitEnd. + WithExplicitEnd = libyaml.WithExplicitEnd + // WithFlowSimpleCollections controls flow style for simple collections. + // See internal/libyaml.WithFlowSimpleCollections. + WithFlowSimpleCollections = libyaml.WithFlowSimpleCollections + // WithQuotePreference sets preferred quote style when quoting is required. + // See internal/libyaml.WithQuotePreference. + WithQuotePreference = libyaml.WithQuotePreference +) + +// Options combines multiple options into a single Option. +// This is useful for creating option presets or combining version defaults +// with custom options. +// +// Example: +// +// opts := yaml.Options(yaml.V4, yaml.WithIndent(3)) +// yaml.Dump(&data, opts) +func Options(opts ...Option) Option { + return libyaml.CombineOptions(opts...) +} + +// OptsYAML parses a YAML string containing option settings and returns +// an Option that can be combined with other options using Options(). +// +// The YAML string can specify any of these fields: +// - indent (int) +// - compact-seq-indent (bool) +// - line-width (int) +// - unicode (bool) +// - canonical (bool) +// - line-break (string: ln, cr, crln) +// - explicit-start (bool) +// - explicit-end (bool) +// - flow-simple-coll (bool) +// - known-fields (bool) +// - single-document (bool) +// - unique-keys (bool) +// +// Only fields specified in the YAML will override other options when +// combined. Unspecified fields won't affect other options. +// +// Example: +// +// opts, err := yaml.OptsYAML(` +// indent: 3 +// known-fields: true +// `) +// yaml.Dump(&data, yaml.Options(V4, opts)) +func OptsYAML(yamlStr string) (Option, error) { + var cfg struct { + Indent *int `yaml:"indent"` + CompactSeqIndent *bool `yaml:"compact-seq-indent"` + LineWidth *int `yaml:"line-width"` + Unicode *bool `yaml:"unicode"` + Canonical *bool `yaml:"canonical"` + LineBreak *string `yaml:"line-break"` + ExplicitStart *bool `yaml:"explicit-start"` + ExplicitEnd *bool `yaml:"explicit-end"` + FlowSimpleCollections *bool `yaml:"flow-simple-coll"` + KnownFields *bool `yaml:"known-fields"` + SingleDocument *bool `yaml:"single-document"` + UniqueKeys *bool `yaml:"unique-keys"` + } + if err := Load([]byte(yamlStr), &cfg, WithKnownFields()); err != nil { + return nil, err + } + + // Build options only for fields that were set + var optList []Option + if cfg.Indent != nil { + optList = append(optList, WithIndent(*cfg.Indent)) + } + if cfg.CompactSeqIndent != nil { + optList = append(optList, WithCompactSeqIndent(*cfg.CompactSeqIndent)) + } + if cfg.LineWidth != nil { + optList = append(optList, WithLineWidth(*cfg.LineWidth)) + } + if cfg.Unicode != nil { + optList = append(optList, WithUnicode(*cfg.Unicode)) + } + if cfg.ExplicitStart != nil { + optList = append(optList, WithExplicitStart(*cfg.ExplicitStart)) + } + if cfg.ExplicitEnd != nil { + optList = append(optList, WithExplicitEnd(*cfg.ExplicitEnd)) + } + if cfg.FlowSimpleCollections != nil { + optList = append(optList, WithFlowSimpleCollections(*cfg.FlowSimpleCollections)) + } + if cfg.KnownFields != nil { + optList = append(optList, WithKnownFields(*cfg.KnownFields)) + } + if cfg.SingleDocument != nil && *cfg.SingleDocument { + optList = append(optList, WithSingleDocument()) + } + if cfg.UniqueKeys != nil { + optList = append(optList, WithUniqueKeys(*cfg.UniqueKeys)) + } + if cfg.Canonical != nil { + optList = append(optList, WithCanonical(*cfg.Canonical)) + } + if cfg.LineBreak != nil { + switch *cfg.LineBreak { + case "ln": + optList = append(optList, WithLineBreak(LineBreakLN)) + case "cr": + optList = append(optList, WithLineBreak(LineBreakCR)) + case "crln": + optList = append(optList, WithLineBreak(LineBreakCRLN)) + default: + return nil, errors.New("yaml: invalid line-break value (use ln, cr, or crln)") + } + } + + return Options(optList...), nil +} + +//----------------------------------------------------------------------------- +// Type and constant re-exports +//----------------------------------------------------------------------------- + +type ( + // Node represents a YAML node in the document tree. + // See internal/libyaml.Node. + Node = libyaml.Node + // Kind identifies the type of a YAML node. + // See internal/libyaml.Kind. + Kind = libyaml.Kind + // Style controls the presentation of a YAML node. + // See internal/libyaml.Style. + Style = libyaml.Style + // Marshaler is implemented by types with custom YAML marshaling. + // See internal/libyaml.Marshaler. + Marshaler = libyaml.Marshaler + // IsZeroer is implemented by types that can report if they're zero. + // See internal/libyaml.IsZeroer. + IsZeroer = libyaml.IsZeroer +) + +// Unmarshaler is the interface implemented by types +// that can unmarshal a YAML description of themselves. +type Unmarshaler interface { + UnmarshalYAML(node *Node) error +} + +// Re-export stream-related types +type ( + VersionDirective = libyaml.StreamVersionDirective + TagDirective = libyaml.StreamTagDirective + Encoding = libyaml.Encoding +) + +// Re-export encoding constants +const ( + EncodingAny = libyaml.ANY_ENCODING + EncodingUTF8 = libyaml.UTF8_ENCODING + EncodingUTF16LE = libyaml.UTF16LE_ENCODING + EncodingUTF16BE = libyaml.UTF16BE_ENCODING +) + +// Re-export error types +type ( + + // LoadError represents an error encountered while decoding a YAML document. + // + // It contains details about the location in the document where the error + // occurred, as well as a descriptive message. + LoadError = libyaml.ConstructError + + // LoadErrors is returned when one or more fields cannot be properly decoded. + // + // It contains multiple *[LoadError] instances with details about each error. + LoadErrors = libyaml.LoadErrors + + // TypeError is an obsolete error type retained for compatibility. + // + // Deprecated: Use [LoadErrors] instead. + // + //nolint:staticcheck // we are using deprecated TypeError for compatibility + TypeError = libyaml.TypeError +) + +// Re-export Kind constants +const ( + DocumentNode = libyaml.DocumentNode + SequenceNode = libyaml.SequenceNode + MappingNode = libyaml.MappingNode + ScalarNode = libyaml.ScalarNode + AliasNode = libyaml.AliasNode + StreamNode = libyaml.StreamNode +) + +// Re-export Style constants +const ( + TaggedStyle = libyaml.TaggedStyle + DoubleQuotedStyle = libyaml.DoubleQuotedStyle + SingleQuotedStyle = libyaml.SingleQuotedStyle + LiteralStyle = libyaml.LiteralStyle + FoldedStyle = libyaml.FoldedStyle + FlowStyle = libyaml.FlowStyle +) + +// LineBreak represents the line ending style for YAML output. +type LineBreak = libyaml.LineBreak + +// Line break constants for different platforms. +const ( + LineBreakLN = libyaml.LN_BREAK // Unix-style \n (default) + LineBreakCR = libyaml.CR_BREAK // Old Mac-style \r + LineBreakCRLN = libyaml.CRLN_BREAK // Windows-style \r\n +) + +// QuoteStyle represents the quote style to use when quoting is required. +type QuoteStyle = libyaml.QuoteStyle + +// Quote style constants for required quoting. +const ( + QuoteSingle = libyaml.QuoteSingle // Prefer single quotes (v4 default) + QuoteDouble = libyaml.QuoteDouble // Prefer double quotes + QuoteLegacy = libyaml.QuoteLegacy // Legacy v2/v3 behavior +) + +//----------------------------------------------------------------------------- +// Helper functions +//----------------------------------------------------------------------------- + +// The code in this section was copied from mgo/bson. + +var ( + structMap = make(map[reflect.Type]*structInfo) + fieldMapMutex sync.RWMutex + unmarshalerType reflect.Type +) + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && !strings.Contains(string(field.Tag), ":") { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, fmt.Errorf("unsupported flag %q in tag %q of type %s", flag, tag, st) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Pointer: + ftype := field.Type + for ftype.Kind() == reflect.Pointer { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PointerTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +var noWriter io.Writer + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(*libyaml.YAMLError); ok { + *err = e.Err + } else { + panic(v) + } + } +} + +//----------------------------------------------------------------------------- +// Classic APIs +//----------------------------------------------------------------------------- + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + composer *libyaml.Composer + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + composer: libyaml.NewComposerFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v any) (err error) { + d := libyaml.NewConstructor(libyaml.DefaultOptions) + d.KnownFields = dec.knownFields + defer handleErr(&err) + node := dec.composer.Parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Pointer && !out.IsNil() { + out = out.Elem() + } + d.Construct(node, out) + if len(d.TypeErrors) > 0 { + return &LoadErrors{Errors: d.TypeErrors} + } + return nil +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *libyaml.Representer +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: libyaml.NewRepresenter(w, libyaml.DefaultOptions), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v any) (err error) { + defer handleErr(&err) + e.encoder.MarshalDoc("", reflect.ValueOf(v)) + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.Indent = spaces +} + +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.Emitter.CompactSequenceIndent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.Emitter.CompactSequenceIndent = false +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.Finish() + return nil +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.LoadErrors is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshaling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Construct([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +func Unmarshal(in []byte, out any) (err error) { + return unmarshal(in, out, V3) +} + +func unmarshal(in []byte, out any, opts ...Option) (err error) { + defer handleErr(&err) + o, err := libyaml.ApplyOptions(opts...) + if err != nil { + return err + } + + // Check if out implements yaml.Unmarshaler + if u, ok := out.(Unmarshaler); ok { + p := libyaml.NewComposer(in) + defer p.Destroy() + node := p.Parse() + if node != nil { + return u.UnmarshalYAML(node) + } + return nil + } + + return libyaml.Construct(in, out, o) +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshaled if they are exported (have an upper case +// first letter), and are marshaled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshaling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// See doc/inline-tags.md for detailed examples and use cases. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +func Marshal(in any) (out []byte, err error) { + defer handleErr(&err) + e := libyaml.NewRepresenter(noWriter, libyaml.DefaultOptions) + defer e.Destroy() + e.MarshalDoc("", reflect.ValueOf(in)) + e.Finish() + out = e.Out + return out, err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index d40db5886624..d928774265f9 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -180,8 +180,8 @@ github.com/cloudflare/circl/math/mlsbset github.com/cloudflare/circl/sign github.com/cloudflare/circl/sign/ed25519 github.com/cloudflare/circl/sign/ed448 -# github.com/compose-spec/compose-go/v2 v2.9.1 -## explicit; go 1.23 +# github.com/compose-spec/compose-go/v2 v2.10.2 +## explicit; go 1.24 github.com/compose-spec/compose-go/v2/cli github.com/compose-spec/compose-go/v2/consts github.com/compose-spec/compose-go/v2/dotenv @@ -1276,6 +1276,10 @@ go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 ## explicit; go 1.16 go.yaml.in/yaml/v3 +# go.yaml.in/yaml/v4 v4.0.0-rc.4 +## explicit; go 1.18 +go.yaml.in/yaml/v4 +go.yaml.in/yaml/v4/internal/libyaml # golang.org/x/crypto v0.48.0 ## explicit; go 1.24.0 golang.org/x/crypto/argon2