You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

624 lines
25 KiB

  1. # Copyright (c) 2017-2019 Intel Corporation
  2. #
  3. # SPDX-License-Identifier: Apache-2.0
  4. #
  5. # XXX: WARNING: this file is auto-generated.
  6. # XXX:
  7. # XXX: Source file: "cli/config/configuration-qemu.toml.in"
  8. # XXX: Project:
  9. # XXX: Name: Kata Containers
  10. # XXX: Type: kata
  11. [hypervisor.qemu]
  12. path = "/opt/kata/bin/qemu-system-x86_64"
  13. {% if kata_containers_version is version('2.2.0', '>=') %}
  14. kernel = "/opt/kata/share/kata-containers/vmlinux.container"
  15. {% else %}
  16. kernel = "/opt/kata/share/kata-containers/vmlinuz.container"
  17. {% endif %}
  18. image = "/opt/kata/share/kata-containers/kata-containers.img"
  19. machine_type = "q35"
  20. # Enable confidential guest support.
  21. # Toggling that setting may trigger different hardware features, ranging
  22. # from memory encryption to both memory and CPU-state encryption and integrity.
  23. # The Kata Containers runtime dynamically detects the available feature set and
  24. # aims at enabling the largest possible one.
  25. # Default false
  26. # confidential_guest = true
  27. # List of valid annotation names for the hypervisor
  28. # Each member of the list is a regular expression, which is the base name
  29. # of the annotation, e.g. "path" for io.katacontainers.config.hypervisor.path"
  30. enable_annotations = []
  31. # List of valid annotations values for the hypervisor
  32. # Each member of the list is a path pattern as described by glob(3).
  33. # The default if not set is empty (all annotations rejected.)
  34. # Your distribution recommends: ["/opt/kata/bin/qemu-system-x86_64"]
  35. valid_hypervisor_paths = ["/opt/kata/bin/qemu-system-x86_64"]
  36. # Optional space-separated list of options to pass to the guest kernel.
  37. # For example, use `kernel_params = "vsyscall=emulate"` if you are having
  38. # trouble running pre-2.15 glibc.
  39. #
  40. # WARNING: - any parameter specified here will take priority over the default
  41. # parameter value of the same name used to start the virtual machine.
  42. # Do not set values here unless you understand the impact of doing so as you
  43. # may stop the virtual machine from booting.
  44. # To see the list of default parameters, enable hypervisor debug, create a
  45. # container and look for 'default-kernel-parameters' log entries.
  46. kernel_params = ""
  47. # Path to the firmware.
  48. # If you want that qemu uses the default firmware leave this option empty
  49. firmware = ""
  50. # Machine accelerators
  51. # comma-separated list of machine accelerators to pass to the hypervisor.
  52. # For example, `machine_accelerators = "nosmm,nosmbus,nosata,nopit,static-prt,nofw"`
  53. machine_accelerators=""
  54. # CPU features
  55. # comma-separated list of cpu features to pass to the cpu
  56. # For example, `cpu_features = "pmu=off,vmx=off"
  57. cpu_features="pmu=off"
  58. # Default number of vCPUs per SB/VM:
  59. # unspecified or 0 --> will be set to 1
  60. # < 0 --> will be set to the actual number of physical cores
  61. # > 0 <= number of physical cores --> will be set to the specified number
  62. # > number of physical cores --> will be set to the actual number of physical cores
  63. default_vcpus = 1
  64. # Default maximum number of vCPUs per SB/VM:
  65. # unspecified or == 0 --> will be set to the actual number of physical cores or to the maximum number
  66. # of vCPUs supported by KVM if that number is exceeded
  67. # > 0 <= number of physical cores --> will be set to the specified number
  68. # > number of physical cores --> will be set to the actual number of physical cores or to the maximum number
  69. # of vCPUs supported by KVM if that number is exceeded
  70. # WARNING: Depending of the architecture, the maximum number of vCPUs supported by KVM is used when
  71. # the actual number of physical cores is greater than it.
  72. # WARNING: Be aware that this value impacts the virtual machine's memory footprint and CPU
  73. # the hotplug functionality. For example, `default_maxvcpus = 240` specifies that until 240 vCPUs
  74. # can be added to a SB/VM, but the memory footprint will be big. Another example, with
  75. # `default_maxvcpus = 8` the memory footprint will be small, but 8 will be the maximum number of
  76. # vCPUs supported by the SB/VM. In general, we recommend that you do not edit this variable,
  77. # unless you know what are you doing.
  78. # NOTICE: on arm platform with gicv2 interrupt controller, set it to 8.
  79. default_maxvcpus = 0
  80. # Bridges can be used to hot plug devices.
  81. # Limitations:
  82. # * Currently only pci bridges are supported
  83. # * Until 30 devices per bridge can be hot plugged.
  84. # * Until 5 PCI bridges can be cold plugged per VM.
  85. # This limitation could be a bug in qemu or in the kernel
  86. # Default number of bridges per SB/VM:
  87. # unspecified or 0 --> will be set to 1
  88. # > 1 <= 5 --> will be set to the specified number
  89. # > 5 --> will be set to 5
  90. default_bridges = 1
  91. # Default memory size in MiB for SB/VM.
  92. # If unspecified then it will be set 2048 MiB.
  93. default_memory = {{ kata_containers_qemu_default_memory }}
  94. #
  95. # Default memory slots per SB/VM.
  96. # If unspecified then it will be set 10.
  97. # This is will determine the times that memory will be hotadded to sandbox/VM.
  98. #memory_slots = 10
  99. # The size in MiB will be plused to max memory of hypervisor.
  100. # It is the memory address space for the NVDIMM devie.
  101. # If set block storage driver (block_device_driver) to "nvdimm",
  102. # should set memory_offset to the size of block device.
  103. # Default 0
  104. #memory_offset = 0
  105. # Specifies virtio-mem will be enabled or not.
  106. # Please note that this option should be used with the command
  107. # "echo 1 > /proc/sys/vm/overcommit_memory".
  108. # Default false
  109. #enable_virtio_mem = true
  110. # Disable block device from being used for a container's rootfs.
  111. # In case of a storage driver like devicemapper where a container's
  112. # root file system is backed by a block device, the block device is passed
  113. # directly to the hypervisor for performance reasons.
  114. # This flag prevents the block device from being passed to the hypervisor,
  115. # 9pfs is used instead to pass the rootfs.
  116. disable_block_device_use = false
  117. # Shared file system type:
  118. # - virtio-fs (default)
  119. # - virtio-9p
  120. {% if kata_containers_version is version('2.2.0', '>=') %}
  121. shared_fs = "virtio-fs"
  122. {% else %}
  123. shared_fs = "virtio-9p"
  124. {% endif %}
  125. # Path to vhost-user-fs daemon.
  126. virtio_fs_daemon = "/opt/kata/libexec/kata-qemu/virtiofsd"
  127. # List of valid annotations values for the virtiofs daemon
  128. # The default if not set is empty (all annotations rejected.)
  129. # Your distribution recommends: ["/opt/kata/libexec/kata-qemu/virtiofsd"]
  130. valid_virtio_fs_daemon_paths = ["/opt/kata/libexec/kata-qemu/virtiofsd"]
  131. # Default size of DAX cache in MiB
  132. virtio_fs_cache_size = 0
  133. # Extra args for virtiofsd daemon
  134. #
  135. # Format example:
  136. # ["-o", "arg1=xxx,arg2", "-o", "hello world", "--arg3=yyy"]
  137. #
  138. # see `virtiofsd -h` for possible options.
  139. virtio_fs_extra_args = ["--thread-pool-size=1"]
  140. # Cache mode:
  141. #
  142. # - none
  143. # Metadata, data, and pathname lookup are not cached in guest. They are
  144. # always fetched from host and any changes are immediately pushed to host.
  145. #
  146. # - auto
  147. # Metadata and pathname lookup cache expires after a configured amount of
  148. # time (default is 1 second). Data is cached while the file is open (close
  149. # to open consistency).
  150. #
  151. # - always
  152. # Metadata, data, and pathname lookup are cached in guest and never expire.
  153. virtio_fs_cache = "always"
  154. # Block storage driver to be used for the hypervisor in case the container
  155. # rootfs is backed by a block device. This is virtio-scsi, virtio-blk
  156. # or nvdimm.
  157. block_device_driver = "virtio-scsi"
  158. # Specifies cache-related options will be set to block devices or not.
  159. # Default false
  160. #block_device_cache_set = true
  161. # Specifies cache-related options for block devices.
  162. # Denotes whether use of O_DIRECT (bypass the host page cache) is enabled.
  163. # Default false
  164. #block_device_cache_direct = true
  165. # Specifies cache-related options for block devices.
  166. # Denotes whether flush requests for the device are ignored.
  167. # Default false
  168. #block_device_cache_noflush = true
  169. # Enable iothreads (data-plane) to be used. This causes IO to be
  170. # handled in a separate IO thread. This is currently only implemented
  171. # for SCSI.
  172. #
  173. enable_iothreads = false
  174. # Enable pre allocation of VM RAM, default false
  175. # Enabling this will result in lower container density
  176. # as all of the memory will be allocated and locked
  177. # This is useful when you want to reserve all the memory
  178. # upfront or in the cases where you want memory latencies
  179. # to be very predictable
  180. # Default false
  181. enable_mem_prealloc = {{ kata_containers_qemu_enable_mem_prealloc }}
  182. # Enable huge pages for VM RAM, default false
  183. # Enabling this will result in the VM memory
  184. # being allocated using huge pages.
  185. # This is useful when you want to use vhost-user network
  186. # stacks within the container. This will automatically
  187. # result in memory pre allocation
  188. #enable_hugepages = true
  189. # Enable vhost-user storage device, default false
  190. # Enabling this will result in some Linux reserved block type
  191. # major range 240-254 being chosen to represent vhost-user devices.
  192. enable_vhost_user_store = false
  193. # The base directory specifically used for vhost-user devices.
  194. # Its sub-path "block" is used for block devices; "block/sockets" is
  195. # where we expect vhost-user sockets to live; "block/devices" is where
  196. # simulated block device nodes for vhost-user devices to live.
  197. vhost_user_store_path = "/var/run/kata-containers/vhost-user"
  198. # Enable vIOMMU, default false
  199. # Enabling this will result in the VM having a vIOMMU device
  200. # This will also add the following options to the kernel's
  201. # command line: intel_iommu=on,iommu=pt
  202. #enable_iommu = true
  203. # Enable IOMMU_PLATFORM, default false
  204. # Enabling this will result in the VM device having iommu_platform=on set
  205. #enable_iommu_platform = true
  206. # List of valid annotations values for the vhost user store path
  207. # The default if not set is empty (all annotations rejected.)
  208. # Your distribution recommends: ["/var/run/kata-containers/vhost-user"]
  209. valid_vhost_user_store_paths = ["/var/run/kata-containers/vhost-user"]
  210. # Enable file based guest memory support. The default is an empty string which
  211. # will disable this feature. In the case of virtio-fs, this is enabled
  212. # automatically and '/dev/shm' is used as the backing folder.
  213. # This option will be ignored if VM templating is enabled.
  214. #file_mem_backend = ""
  215. # List of valid annotations values for the file_mem_backend annotation
  216. # The default if not set is empty (all annotations rejected.)
  217. # Your distribution recommends: [""]
  218. valid_file_mem_backends = [""]
  219. # Enable swap of vm memory. Default false.
  220. # The behaviour is undefined if mem_prealloc is also set to true
  221. #enable_swap = true
  222. # -pflash can add image file to VM. The arguments of it should be in format
  223. # of ["/path/to/flash0.img", "/path/to/flash1.img"]
  224. pflashes = []
  225. # This option changes the default hypervisor and kernel parameters
  226. # to enable debug output where available. This extra output is added
  227. # to the proxy logs, but only when proxy debug is also enabled.
  228. #
  229. # Default false
  230. enable_debug = {{ kata_containers_qemu_debug }}
  231. # Disable the customizations done in the runtime when it detects
  232. # that it is running on top a VMM. This will result in the runtime
  233. # behaving as it would when running on bare metal.
  234. #
  235. #disable_nesting_checks = true
  236. # This is the msize used for 9p shares. It is the number of bytes
  237. # used for 9p packet payload.
  238. #msize_9p = 8192
  239. # If true and vsocks are supported, use vsocks to communicate directly
  240. # with the agent and no proxy is started, otherwise use unix
  241. # sockets and start a proxy to communicate with the agent.
  242. # Default false
  243. #use_vsock = true
  244. # If false and nvdimm is supported, use nvdimm device to plug guest image.
  245. # Otherwise virtio-block device is used.
  246. # Default is false
  247. #disable_image_nvdimm = true
  248. # VFIO devices are hotplugged on a bridge by default.
  249. # Enable hotplugging on root bus. This may be required for devices with
  250. # a large PCI bar, as this is a current limitation with hotplugging on
  251. # a bridge. This value is valid for "pc" machine type.
  252. # Default false
  253. #hotplug_vfio_on_root_bus = true
  254. # Before hot plugging a PCIe device, you need to add a pcie_root_port device.
  255. # Use this parameter when using some large PCI bar devices, such as Nvidia GPU
  256. # The value means the number of pcie_root_port
  257. # This value is valid when hotplug_vfio_on_root_bus is true and machine_type is "q35"
  258. # Default 0
  259. #pcie_root_port = 2
  260. # If vhost-net backend for virtio-net is not desired, set to true. Default is false, which trades off
  261. # security (vhost-net runs ring0) for network I/O performance.
  262. #disable_vhost_net = true
  263. #
  264. # Default entropy source.
  265. # The path to a host source of entropy (including a real hardware RNG)
  266. # /dev/urandom and /dev/random are two main options.
  267. # Be aware that /dev/random is a blocking source of entropy. If the host
  268. # runs out of entropy, the VMs boot time will increase leading to get startup
  269. # timeouts.
  270. # The source of entropy /dev/urandom is non-blocking and provides a
  271. # generally acceptable source of entropy. It should work well for pretty much
  272. # all practical purposes.
  273. #entropy_source= "/dev/urandom"
  274. # List of valid annotations values for entropy_source
  275. # The default if not set is empty (all annotations rejected.)
  276. # Your distribution recommends: ["/dev/urandom","/dev/random",""]
  277. valid_entropy_sources = ["/dev/urandom","/dev/random",""]
  278. # Path to OCI hook binaries in the *guest rootfs*.
  279. # This does not affect host-side hooks which must instead be added to
  280. # the OCI spec passed to the runtime.
  281. #
  282. # You can create a rootfs with hooks by customizing the osbuilder scripts:
  283. # https://github.com/kata-containers/osbuilder
  284. #
  285. # Hooks must be stored in a subdirectory of guest_hook_path according to their
  286. # hook type, i.e. "guest_hook_path/{prestart,postart,poststop}".
  287. # The agent will scan these directories for executable files and add them, in
  288. # lexicographical order, to the lifecycle of the guest container.
  289. # Hooks are executed in the runtime namespace of the guest. See the official documentation:
  290. # https://github.com/opencontainers/runtime-spec/blob/v1.0.1/config.md#posix-platform-hooks
  291. # Warnings will be logged if any error is encountered will scanning for hooks,
  292. # but it will not abort container execution.
  293. #guest_hook_path = "/usr/share/oci/hooks"
  294. #
  295. # Use rx Rate Limiter to control network I/O inbound bandwidth(size in bits/sec for SB/VM).
  296. # In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) to discipline traffic.
  297. # Default 0-sized value means unlimited rate.
  298. #rx_rate_limiter_max_rate = 0
  299. # Use tx Rate Limiter to control network I/O outbound bandwidth(size in bits/sec for SB/VM).
  300. # In Qemu, we use classful qdiscs HTB(Hierarchy Token Bucket) and ifb(Intermediate Functional Block)
  301. # to discipline traffic.
  302. # Default 0-sized value means unlimited rate.
  303. #tx_rate_limiter_max_rate = 0
  304. # Set where to save the guest memory dump file.
  305. # If set, when GUEST_PANICKED event occurred,
  306. # guest memeory will be dumped to host filesystem under guest_memory_dump_path,
  307. # This directory will be created automatically if it does not exist.
  308. #
  309. # The dumped file(also called vmcore) can be processed with crash or gdb.
  310. #
  311. # WARNING:
  312. # Dump guest’s memory can take very long depending on the amount of guest memory
  313. # and use much disk space.
  314. #guest_memory_dump_path="/var/crash/kata"
  315. # If enable paging.
  316. # Basically, if you want to use "gdb" rather than "crash",
  317. # or need the guest-virtual addresses in the ELF vmcore,
  318. # then you should enable paging.
  319. #
  320. # See: https://www.qemu.org/docs/master/qemu-qmp-ref.html#Dump-guest-memory for details
  321. #guest_memory_dump_paging=false
  322. # Enable swap in the guest. Default false.
  323. # When enable_guest_swap is enabled, insert a raw file to the guest as the swap device
  324. # if the swappiness of a container (set by annotation "io.katacontainers.container.resource.swappiness")
  325. # is bigger than 0.
  326. # The size of the swap device should be
  327. # swap_in_bytes (set by annotation "io.katacontainers.container.resource.swap_in_bytes") - memory_limit_in_bytes.
  328. # If swap_in_bytes is not set, the size should be memory_limit_in_bytes.
  329. # If swap_in_bytes and memory_limit_in_bytes is not set, the size should
  330. # be default_memory.
  331. #enable_guest_swap = true
  332. [factory]
  333. # VM templating support. Once enabled, new VMs are created from template
  334. # using vm cloning. They will share the same initial kernel, initramfs and
  335. # agent memory by mapping it readonly. It helps speeding up new container
  336. # creation and saves a lot of memory if there are many kata containers running
  337. # on the same host.
  338. #
  339. # When disabled, new VMs are created from scratch.
  340. #
  341. # Note: Requires "initrd=" to be set ("image=" is not supported).
  342. #
  343. # Default false
  344. #enable_template = true
  345. # Specifies the path of template.
  346. #
  347. # Default "/run/vc/vm/template"
  348. #template_path = "/run/vc/vm/template"
  349. # The number of caches of VMCache:
  350. # unspecified or == 0 --> VMCache is disabled
  351. # > 0 --> will be set to the specified number
  352. #
  353. # VMCache is a function that creates VMs as caches before using it.
  354. # It helps speed up new container creation.
  355. # The function consists of a server and some clients communicating
  356. # through Unix socket. The protocol is gRPC in protocols/cache/cache.proto.
  357. # The VMCache server will create some VMs and cache them by factory cache.
  358. # It will convert the VM to gRPC format and transport it when gets
  359. # requestion from clients.
  360. # Factory grpccache is the VMCache client. It will request gRPC format
  361. # VM and convert it back to a VM. If VMCache function is enabled,
  362. # kata-runtime will request VM from factory grpccache when it creates
  363. # a new sandbox.
  364. #
  365. # Default 0
  366. #vm_cache_number = 0
  367. # Specify the address of the Unix socket that is used by VMCache.
  368. #
  369. # Default /var/run/kata-containers/cache.sock
  370. #vm_cache_endpoint = "/var/run/kata-containers/cache.sock"
  371. [proxy.kata]
  372. path = "/opt/kata/libexec/kata-containers/kata-proxy"
  373. # If enabled, proxy messages will be sent to the system log
  374. # (default: disabled)
  375. enable_debug = {{ kata_containers_qemu_debug }}
  376. [shim.kata]
  377. path = "/opt/kata/libexec/kata-containers/kata-shim"
  378. # If enabled, shim messages will be sent to the system log
  379. # (default: disabled)
  380. enable_debug = {{ kata_containers_qemu_debug }}
  381. # If enabled, the shim will create opentracing.io traces and spans.
  382. # (See https://www.jaegertracing.io/docs/getting-started).
  383. #
  384. # Note: By default, the shim runs in a separate network namespace. Therefore,
  385. # to allow it to send trace details to the Jaeger agent running on the host,
  386. # it is necessary to set 'disable_new_netns=true' so that it runs in the host
  387. # network namespace.
  388. #
  389. # (default: disabled)
  390. #enable_tracing = true
  391. [agent.kata]
  392. # If enabled, make the agent display debug-level messages.
  393. # (default: disabled)
  394. enable_debug = {{ kata_containers_qemu_debug }}
  395. # Enable agent tracing.
  396. #
  397. # If enabled, the default trace mode is "dynamic" and the
  398. # default trace type is "isolated". The trace mode and type are set
  399. # explicity with the `trace_type=` and `trace_mode=` options.
  400. #
  401. # Notes:
  402. #
  403. # - Tracing is ONLY enabled when `enable_tracing` is set: explicitly
  404. # setting `trace_mode=` and/or `trace_type=` without setting `enable_tracing`
  405. # will NOT activate agent tracing.
  406. #
  407. # - See https://github.com/kata-containers/agent/blob/master/TRACING.md for
  408. # full details.
  409. #
  410. # (default: disabled)
  411. #enable_tracing = true
  412. #
  413. #trace_mode = "dynamic"
  414. #trace_type = "isolated"
  415. # Comma separated list of kernel modules and their parameters.
  416. # These modules will be loaded in the guest kernel using modprobe(8).
  417. # The following example can be used to load two kernel modules with parameters
  418. # - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"]
  419. # The first word is considered as the module name and the rest as its parameters.
  420. # Container will not be started when:
  421. # * A kernel module is specified and the modprobe command is not installed in the guest
  422. # or it fails loading the module.
  423. # * The module is not available in the guest or it doesn't met the guest kernel
  424. # requirements, like architecture and version.
  425. #
  426. kernel_modules=[]
  427. # Enable debug console.
  428. # If enabled, user can connect guest OS running inside hypervisor
  429. # through "kata-runtime exec <sandbox-id>" command
  430. #debug_console_enabled = true
  431. # Agent connection dialing timeout value in seconds
  432. # (default: 30)
  433. #dial_timeout = 30
  434. [netmon]
  435. # If enabled, the network monitoring process gets started when the
  436. # sandbox is created. This allows for the detection of some additional
  437. # network being added to the existing network namespace, after the
  438. # sandbox has been created.
  439. # (default: disabled)
  440. #enable_netmon = true
  441. # Specify the path to the netmon binary.
  442. path = "/opt/kata/libexec/kata-containers/kata-netmon"
  443. # If enabled, netmon messages will be sent to the system log
  444. # (default: disabled)
  445. enable_debug = {{ kata_containers_qemu_debug }}
  446. [runtime]
  447. # If enabled, the runtime will log additional debug messages to the
  448. # system log
  449. # (default: disabled)
  450. enable_debug = {{ kata_containers_qemu_debug }}
  451. #
  452. # Internetworking model
  453. # Determines how the VM should be connected to the
  454. # the container network interface
  455. # Options:
  456. #
  457. # - macvtap
  458. # Used when the Container network interface can be bridged using
  459. # macvtap.
  460. #
  461. # - none
  462. # Used when customize network. Only creates a tap device. No veth pair.
  463. #
  464. # - tcfilter
  465. # Uses tc filter rules to redirect traffic from the network interface
  466. # provided by plugin to a tap interface connected to the VM.
  467. #
  468. internetworking_model="tcfilter"
  469. # disable guest seccomp
  470. # Determines whether container seccomp profiles are passed to the virtual
  471. # machine and applied by the kata agent. If set to true, seccomp is not applied
  472. # within the guest
  473. # (default: true)
  474. disable_guest_seccomp=true
  475. # If enabled, the runtime will create opentracing.io traces and spans.
  476. # (See https://www.jaegertracing.io/docs/getting-started).
  477. # (default: disabled)
  478. #enable_tracing = true
  479. # Set the full url to the Jaeger HTTP Thrift collector.
  480. # The default if not set will be "http://localhost:14268/api/traces"
  481. #jaeger_endpoint = ""
  482. # Sets the username to be used if basic auth is required for Jaeger.
  483. #jaeger_user = ""
  484. # Sets the password to be used if basic auth is required for Jaeger.
  485. #jaeger_password = ""
  486. # If enabled, the runtime will not create a network namespace for shim and hypervisor processes.
  487. # This option may have some potential impacts to your host. It should only be used when you know what you're doing.
  488. # `disable_new_netns` conflicts with `enable_netmon`
  489. # `disable_new_netns` conflicts with `internetworking_model=tcfilter` and `internetworking_model=macvtap`. It works only
  490. # with `internetworking_model=none`. The tap device will be in the host network namespace and can connect to a bridge
  491. # (like OVS) directly.
  492. # If you are using docker, `disable_new_netns` only works with `docker run --net=none`
  493. # (default: false)
  494. #disable_new_netns = true
  495. # if enabled, the runtime will add all the kata processes inside one dedicated cgroup.
  496. # The container cgroups in the host are not created, just one single cgroup per sandbox.
  497. # The runtime caller is free to restrict or collect cgroup stats of the overall Kata sandbox.
  498. # The sandbox cgroup path is the parent cgroup of a container with the PodSandbox annotation.
  499. # The sandbox cgroup is constrained if there is no container type annotation.
  500. # See: https://godoc.org/github.com/kata-containers/runtime/virtcontainers#ContainerType
  501. sandbox_cgroup_only={{ kata_containers_qemu_sandbox_cgroup_only }}
  502. # If specified, sandbox_bind_mounts identifieds host paths to be mounted (ro) into the sandboxes shared path.
  503. # This is only valid if filesystem sharing is utilized. The provided path(s) will be bindmounted into the shared fs directory.
  504. # If defaults are utilized, these mounts should be available in the guest at `/run/kata-containers/shared/containers/sandbox-mounts`
  505. # These will not be exposed to the container workloads, and are only provided for potential guest services.
  506. sandbox_bind_mounts=[]
  507. # Enabled experimental feature list, format: ["a", "b"].
  508. # Experimental features are features not stable enough for production,
  509. # they may break compatibility, and are prepared for a big version bump.
  510. # Supported experimental features:
  511. # (default: [])
  512. experimental=[]
  513. # If enabled, user can run pprof tools with shim v2 process through kata-monitor.
  514. # (default: false)
  515. # enable_pprof = true
  516. # WARNING: All the options in the following section have not been implemented yet.
  517. # This section was added as a placeholder. DO NOT USE IT!
  518. [image]
  519. # Container image service.
  520. #
  521. # Offload the CRI image management service to the Kata agent.
  522. # (default: false)
  523. #service_offload = true
  524. # Container image decryption keys provisioning.
  525. # Applies only if service_offload is true.
  526. # Keys can be provisioned locally (e.g. through a special command or
  527. # a local file) or remotely (usually after the guest is remotely attested).
  528. # The provision setting is a complete URL that lets the Kata agent decide
  529. # which method to use in order to fetch the keys.
  530. #
  531. # Keys can be stored in a local file, in a measured and attested initrd:
  532. #provision=data:///local/key/file
  533. #
  534. # Keys could be fetched through a special command or binary from the
  535. # initrd (guest) image, e.g. a firmware call:
  536. #provision=file:///path/to/bin/fetcher/in/guest
  537. #
  538. # Keys can be remotely provisioned. The Kata agent fetches them from e.g.
  539. # a HTTPS URL:
  540. #provision=https://my-key-broker.foo/tenant/<tenant-id>