`; return; } const search = await pagefind.debouncedSearch(query); if (search === null) { return; } else { const resultsLength = search.results.length const resultsData = await Promise.all(search.results.slice(0, 5).map(r => r.data())); const results = resultsData.map((item, index) => ({...item, index: index + 1})); if (query) { searchBarResults.classList.remove("hidden"); } else { searchBarResults.classList.add("hidden"); } let resultsHTML = `
${resultsLength} results
`; resultsHTML += results .map((item) => { return `
${item.meta.title}

…${item.excerpt}…

`; }) .join(""); if (resultsLength > 5) { resultsHTML += ``; } searchBarResults.innerHTML = resultsHTML; } } searchBarInput.addEventListener("input", search); if (window.heap !== undefined) { searchBarResults.addEventListener('click', function (event) { if (event.target.tagName === 'A' && event.target.closest('.link')) { const searchQuery = event.target.getAttribute('data-query'); const resultIndex = event.target.getAttribute('data-index'); const url = new URL(event.target.href); const properties = { docs_search_target_path: url.pathname, docs_search_target_title: event.target.textContent, docs_search_query_text: searchQuery, docs_search_target_index: resultIndex, docs_search_source_path: window.location.pathname, docs_search_source_title: document.title, }; heap.track("Docs - Search - Click - Result Link", properties); } }); } });

Amazon S3 cache

Restricted

This is an experimental feature. The interface and behavior are unstable and may change in future releases.

The s3 cache storage uploads your resulting build cache to Amazon S3 file storage service or other S3-compatible services, such as MinIO.

This cache storage backend is not supported with the default docker driver. To use this feature, create a new builder using a different driver. See Build drivers for more information.

Synopsis

$ docker buildx build --push -t <user>/<image> \
  --cache-to type=s3,region=<region>,bucket=<bucket>,name=<cache-image>[,parameters...] \
  --cache-from type=s3,region=<region>,bucket=<bucket>,name=<cache-image> .

The following table describes the available CSV parameters that you can pass to --cache-to and --cache-from.

NameOptionTypeDefaultDescription
regioncache-to,cache-fromStringRequired. Geographic location.
bucketcache-to,cache-fromStringRequired. Name of the S3 bucket.
namecache-to,cache-fromStringName of the cache image.
endpoint_urlcache-to,cache-fromStringEndpoint of the S3 bucket.
blobs_prefixcache-to,cache-fromStringPrefix to prepend to blob filenames.
upload_parallelismcache-toInteger4Number of parallel layer uploads.
touch_refreshcache-toTime24hInterval for updating the timestamp of unchanged cache layers.
manifests_prefixcache-to,cache-fromStringPrefix to prepend on manifest filenames.
use_path_stylecache-to,cache-fromBooleanfalseWhen true, uses bucket in the URL instead of hostname.
access_key_idcache-to,cache-fromStringSee authentication.
secret_access_keycache-to,cache-fromStringSee authentication.
session_tokencache-to,cache-fromStringSee authentication.
modecache-tomin,maxminCache layers to export, see cache mode.
ignore-errorcache-toBooleanfalseIgnore errors caused by failed cache exports.

Authentication

Buildx can reuse existing AWS credentials, configured either using a credentials file or environment variables, for pushing and pulling cache to S3. Alternatively, you can use the access_key_id, secret_access_key, and session_token attributes to specify credentials directly on the CLI.

Refer to AWS Go SDK, Specifying Credentials for details about authentication using environment variables and credentials file.

Further reading

For an introduction to caching see Docker build cache.

For more information on the s3 cache backend, see the BuildKit README.