Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
45a3a79246 | ||
|
|
72af779e8a | ||
|
|
05846896d1 | ||
|
|
906948782e | ||
|
|
1e1cc940df | ||
|
|
e89ed1bcb6 |
10
.github/dependabot.yml
vendored
10
.github/dependabot.yml
vendored
@@ -1,10 +0,0 @@
|
|||||||
version: 2
|
|
||||||
updates:
|
|
||||||
- package-ecosystem: "github-actions"
|
|
||||||
open-pull-requests-limit: 10
|
|
||||||
directory: "/"
|
|
||||||
schedule:
|
|
||||||
interval: "daily"
|
|
||||||
labels:
|
|
||||||
- "dependencies"
|
|
||||||
- "bot"
|
|
||||||
166
.github/workflows/build.yml
vendored
166
.github/workflows/build.yml
vendored
@@ -1,9 +1,5 @@
|
|||||||
name: build
|
name: build
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
push:
|
push:
|
||||||
@@ -19,113 +15,48 @@ on:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
REPO_SLUG: "docker/buildx-bin"
|
REPO_SLUG: "docker/buildx-bin"
|
||||||
|
REPO_SLUG_ORIGIN: "moby/buildkit:master"
|
||||||
RELEASE_OUT: "./release-out"
|
RELEASE_OUT: "./release-out"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v2
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
-
|
|
||||||
name: Test
|
|
||||||
uses: docker/bake-action@v2
|
|
||||||
with:
|
|
||||||
targets: test
|
|
||||||
set: |
|
|
||||||
*.cache-from=type=gha,scope=test
|
|
||||||
*.cache-to=type=gha,scope=test
|
|
||||||
-
|
|
||||||
name: Upload coverage
|
|
||||||
uses: codecov/codecov-action@v3
|
|
||||||
with:
|
|
||||||
file: ./coverage/coverage.txt
|
|
||||||
|
|
||||||
prepare:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
outputs:
|
|
||||||
matrix: ${{ steps.platforms.outputs.matrix }}
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
-
|
|
||||||
name: Create matrix
|
|
||||||
id: platforms
|
|
||||||
run: |
|
|
||||||
echo ::set-output name=matrix::$(docker buildx bake binaries-cross --print | jq -cr '.target."binaries-cross".platforms')
|
|
||||||
-
|
|
||||||
name: Show matrix
|
|
||||||
run: |
|
|
||||||
echo ${{ steps.platforms.outputs.matrix }}
|
|
||||||
|
|
||||||
binaries:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs:
|
|
||||||
- prepare
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
platform: ${{ fromJson(needs.prepare.outputs.matrix) }}
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Prepare
|
|
||||||
run: |
|
|
||||||
platform=${{ matrix.platform }}
|
|
||||||
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
-
|
-
|
||||||
name: Set up QEMU
|
name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v1
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/setup-buildx-action@v1
|
||||||
with:
|
with:
|
||||||
version: latest
|
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
||||||
-
|
-
|
||||||
name: Build
|
name: Test
|
||||||
uses: docker/bake-action@v2
|
run: |
|
||||||
|
make test
|
||||||
|
-
|
||||||
|
name: Send to Codecov
|
||||||
|
uses: codecov/codecov-action@v2
|
||||||
with:
|
with:
|
||||||
targets: release
|
file: ./coverage/coverage.txt
|
||||||
set: |
|
-
|
||||||
*.platform=${{ matrix.platform }}
|
name: Build binaries
|
||||||
*.cache-from=type=gha,scope=binaries-${{ env.PLATFORM_PAIR }}
|
run: |
|
||||||
*.cache-to=type=gha,scope=binaries-${{ env.PLATFORM_PAIR }},mode=max
|
make release
|
||||||
-
|
-
|
||||||
name: Upload artifacts
|
name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: buildx
|
name: buildx
|
||||||
path: ${{ env.RELEASE_OUT }}/*
|
path: ${{ env.RELEASE_OUT }}/*
|
||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
|
|
||||||
bin-image:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: ${{ github.event_name != 'pull_request' }}
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v2
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
-
|
-
|
||||||
name: Docker meta
|
name: Docker meta
|
||||||
id: meta
|
id: meta
|
||||||
uses: docker/metadata-action@v4
|
uses: docker/metadata-action@v3
|
||||||
with:
|
with:
|
||||||
images: |
|
images: |
|
||||||
${{ env.REPO_SLUG }}
|
${{ env.REPO_SLUG }}
|
||||||
@@ -137,78 +68,25 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Login to DockerHub
|
name: Login to DockerHub
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
uses: docker/login-action@v2
|
uses: docker/login-action@v1
|
||||||
with:
|
with:
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
-
|
-
|
||||||
name: Build and push image
|
name: Build and push image
|
||||||
uses: docker/bake-action@v2
|
uses: docker/bake-action@v1
|
||||||
with:
|
with:
|
||||||
files: |
|
files: |
|
||||||
./docker-bake.hcl
|
./docker-bake.hcl
|
||||||
${{ steps.meta.outputs.bake-file }}
|
${{ steps.meta.outputs.bake-file }}
|
||||||
targets: image-cross
|
targets: image-cross
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
set: |
|
|
||||||
*.cache-from=type=gha,scope=bin-image
|
|
||||||
*.cache-to=type=gha,scope=bin-image,mode=max
|
|
||||||
|
|
||||||
release:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs:
|
|
||||||
- binaries
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
-
|
|
||||||
name: Download binaries
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: buildx
|
|
||||||
path: ${{ env.RELEASE_OUT }}
|
|
||||||
-
|
|
||||||
name: Create checksums
|
|
||||||
run: ./hack/hash-files
|
|
||||||
-
|
|
||||||
name: List artifacts
|
|
||||||
run: |
|
|
||||||
tree -nh ${{ env.RELEASE_OUT }}
|
|
||||||
-
|
|
||||||
name: Check artifacts
|
|
||||||
run: |
|
|
||||||
find ${{ env.RELEASE_OUT }} -type f -exec file -e ascii -- {} +
|
|
||||||
-
|
-
|
||||||
name: GitHub Release
|
name: GitHub Release
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
uses: softprops/action-gh-release@1e07f4398721186383de40550babbdf2b84acfc5
|
uses: softprops/action-gh-release@v1
|
||||||
env:
|
env:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
draft: true
|
draft: true
|
||||||
files: ${{ env.RELEASE_OUT }}/*
|
files: ${{ env.RELEASE_OUT }}/*
|
||||||
|
|
||||||
buildkit-edge:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
continue-on-error: true
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v2
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
driver-opts: image=moby/buildkit:master
|
|
||||||
buildkitd-flags: --debug
|
|
||||||
-
|
|
||||||
# Just run a bake target to check eveything runs fine
|
|
||||||
name: Build
|
|
||||||
uses: docker/bake-action@v2
|
|
||||||
with:
|
|
||||||
targets: binaries
|
|
||||||
|
|||||||
56
.github/workflows/docs-release.yml
vendored
56
.github/workflows/docs-release.yml
vendored
@@ -1,56 +0,0 @@
|
|||||||
name: docs-release
|
|
||||||
|
|
||||||
on:
|
|
||||||
release:
|
|
||||||
types: [ released ]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
open-pr:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout docs repo
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
|
||||||
repository: docker/docker.github.io
|
|
||||||
ref: master
|
|
||||||
-
|
|
||||||
name: Prepare
|
|
||||||
run: |
|
|
||||||
rm -rf ./_data/buildx/*
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
-
|
|
||||||
name: Build docs
|
|
||||||
uses: docker/bake-action@v2
|
|
||||||
with:
|
|
||||||
source: ${{ github.server_url }}/${{ github.repository }}.git#${{ github.event.release.name }}
|
|
||||||
targets: update-docs
|
|
||||||
set: |
|
|
||||||
*.output=/tmp/buildx-docs
|
|
||||||
env:
|
|
||||||
DOCS_FORMATS: yaml
|
|
||||||
-
|
|
||||||
name: Copy files
|
|
||||||
run: |
|
|
||||||
cp /tmp/buildx-docs/out/reference/*.yaml ./_data/buildx/
|
|
||||||
-
|
|
||||||
name: Commit changes
|
|
||||||
run: |
|
|
||||||
git add -A .
|
|
||||||
-
|
|
||||||
name: Create PR on docs repo
|
|
||||||
uses: peter-evans/create-pull-request@923ad837f191474af6b1721408744feb989a4c27 # v4.0.4
|
|
||||||
with:
|
|
||||||
token: ${{ secrets.GHPAT_DOCS_DISPATCH }}
|
|
||||||
push-to-fork: docker-tools-robot/docker.github.io
|
|
||||||
commit-message: "build: update buildx reference to ${{ github.event.release.name }}"
|
|
||||||
signoff: true
|
|
||||||
branch: dispatch/buildx-ref-${{ github.event.release.name }}
|
|
||||||
delete-branch: true
|
|
||||||
title: Update buildx reference to ${{ github.event.release.name }}
|
|
||||||
body: |
|
|
||||||
Update the buildx reference documentation to keep in sync with the latest release `${{ github.event.release.name }}`
|
|
||||||
draft: false
|
|
||||||
118
.github/workflows/docs-upstream.yml
vendored
118
.github/workflows/docs-upstream.yml
vendored
@@ -1,118 +0,0 @@
|
|||||||
# this workflow runs the remote validate bake target from docker/docker.github.io
|
|
||||||
# to check if yaml reference docs and markdown files used in this repo are still valid
|
|
||||||
# https://github.com/docker/docker.github.io/blob/98c7c9535063ae4cd2cd0a31478a21d16d2f07a3/docker-bake.hcl#L34-L36
|
|
||||||
name: docs-upstream
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'master'
|
|
||||||
- 'v[0-9]*'
|
|
||||||
paths:
|
|
||||||
- '.github/workflows/docs-upstream.yml'
|
|
||||||
- 'docs/**'
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- '.github/workflows/docs-upstream.yml'
|
|
||||||
- 'docs/**'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
docs-yaml:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
-
|
|
||||||
name: Build reference YAML docs
|
|
||||||
uses: docker/bake-action@v2
|
|
||||||
with:
|
|
||||||
targets: update-docs
|
|
||||||
set: |
|
|
||||||
*.output=/tmp/buildx-docs
|
|
||||||
*.cache-from=type=gha,scope=docs-yaml
|
|
||||||
*.cache-to=type=gha,scope=docs-yaml,mode=max
|
|
||||||
env:
|
|
||||||
DOCS_FORMATS: yaml
|
|
||||||
-
|
|
||||||
name: Upload reference YAML docs
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: docs-yaml
|
|
||||||
path: /tmp/buildx-docs/out/reference
|
|
||||||
retention-days: 1
|
|
||||||
|
|
||||||
validate:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs:
|
|
||||||
- docs-yaml
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
repository: docker/docker.github.io
|
|
||||||
-
|
|
||||||
name: Install js-yaml
|
|
||||||
run: npm install js-yaml
|
|
||||||
-
|
|
||||||
# use the actual buildx ref that triggers this workflow, so we make
|
|
||||||
# sure pages fetched by docs repo are still valid
|
|
||||||
# https://github.com/docker/docker.github.io/blob/98c7c9535063ae4cd2cd0a31478a21d16d2f07a3/_config.yml#L164-L173
|
|
||||||
name: Set correct ref to fetch remote resources
|
|
||||||
uses: actions/github-script@v6
|
|
||||||
with:
|
|
||||||
script: |
|
|
||||||
const fs = require('fs');
|
|
||||||
const yaml = require('js-yaml');
|
|
||||||
|
|
||||||
const configFile = '_config.yml'
|
|
||||||
const config = yaml.load(fs.readFileSync(configFile, 'utf8'));
|
|
||||||
for (const remote of config['fetch-remote']) {
|
|
||||||
if (remote['repo'] != 'https://github.com/docker/buildx') {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
remote['ref'] = "${{ github.ref }}";
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
fs.writeFileSync(configFile, yaml.dump(config), 'utf8')
|
|
||||||
} catch (err) {
|
|
||||||
console.error(err.message)
|
|
||||||
process.exit(1)
|
|
||||||
}
|
|
||||||
-
|
|
||||||
name: Prepare
|
|
||||||
run: |
|
|
||||||
# print docs jekyll config updated in previous step
|
|
||||||
yq _config.yml
|
|
||||||
# cleanup reference yaml docs and js-yaml module
|
|
||||||
rm -rf ./_data/buildx/* ./node_modules
|
|
||||||
-
|
|
||||||
name: Download built reference YAML docs
|
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: docs-yaml
|
|
||||||
path: ./_data/buildx/
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
-
|
|
||||||
name: Validate
|
|
||||||
uses: docker/bake-action@v2
|
|
||||||
with:
|
|
||||||
targets: validate
|
|
||||||
set: |
|
|
||||||
*.cache-from=type=gha,scope=docs-upstream
|
|
||||||
*.cache-to=type=gha,scope=docs-upstream,mode=max
|
|
||||||
74
.github/workflows/e2e.yml
vendored
74
.github/workflows/e2e.yml
vendored
@@ -1,9 +1,5 @@
|
|||||||
name: e2e
|
name: e2e
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
push:
|
push:
|
||||||
@@ -16,44 +12,8 @@ on:
|
|||||||
- 'v[0-9]*'
|
- 'v[0-9]*'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
env:
|
|
||||||
BIN_OUT: ./bin
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
-
|
|
||||||
name: Build
|
|
||||||
uses: docker/bake-action@v2
|
|
||||||
with:
|
|
||||||
targets: binaries
|
|
||||||
set: |
|
|
||||||
*.cache-from=type=gha,scope=release
|
|
||||||
*.cache-from=type=gha,scope=binaries
|
|
||||||
*.cache-to=type=gha,scope=binaries
|
|
||||||
-
|
|
||||||
name: Rename binary
|
|
||||||
run: |
|
|
||||||
mv ${{ env.BIN_OUT }}/buildx ${{ env.BIN_OUT }}/docker-buildx
|
|
||||||
-
|
|
||||||
name: Upload artifacts
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: binary
|
|
||||||
path: ${{ env.BIN_OUT }}
|
|
||||||
if-no-files-found: error
|
|
||||||
retention-days: 7
|
|
||||||
|
|
||||||
driver:
|
driver:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
needs:
|
|
||||||
- build
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
@@ -61,7 +21,6 @@ jobs:
|
|||||||
- docker
|
- docker
|
||||||
- docker-container
|
- docker-container
|
||||||
- kubernetes
|
- kubernetes
|
||||||
- remote
|
|
||||||
buildkit:
|
buildkit:
|
||||||
- moby/buildkit:buildx-stable-1
|
- moby/buildkit:buildx-stable-1
|
||||||
- moby/buildkit:master
|
- moby/buildkit:master
|
||||||
@@ -72,13 +31,10 @@ jobs:
|
|||||||
- mnode-false
|
- mnode-false
|
||||||
- mnode-true
|
- mnode-true
|
||||||
platforms:
|
platforms:
|
||||||
- linux/amd64
|
|
||||||
- linux/amd64,linux/arm64
|
- linux/amd64,linux/arm64
|
||||||
include:
|
include:
|
||||||
- driver: kubernetes
|
- driver: kubernetes
|
||||||
driver-opt: qemu.install=true
|
driver-opt: qemu.install=true
|
||||||
- driver: remote
|
|
||||||
endpoint: tcp://localhost:1234
|
|
||||||
exclude:
|
exclude:
|
||||||
- driver: docker
|
- driver: docker
|
||||||
multi-node: mnode-true
|
multi-node: mnode-true
|
||||||
@@ -86,28 +42,18 @@ jobs:
|
|||||||
buildkit-cfg: bkcfg-true
|
buildkit-cfg: bkcfg-true
|
||||||
- driver: docker-container
|
- driver: docker-container
|
||||||
multi-node: mnode-true
|
multi-node: mnode-true
|
||||||
- driver: remote
|
|
||||||
multi-node: mnode-true
|
|
||||||
- driver: remote
|
|
||||||
buildkit-cfg: bkcfg-true
|
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v2
|
||||||
-
|
-
|
||||||
name: Set up QEMU
|
name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v2
|
uses: docker/setup-qemu-action@v1
|
||||||
if: matrix.driver == 'docker' || matrix.driver == 'docker-container'
|
if: matrix.driver == 'docker' || matrix.driver == 'docker-container'
|
||||||
-
|
-
|
||||||
name: Install buildx
|
name: Install buildx
|
||||||
uses: actions/download-artifact@v3
|
|
||||||
with:
|
|
||||||
name: binary
|
|
||||||
path: /home/runner/.docker/cli-plugins
|
|
||||||
-
|
|
||||||
name: Fix perms and check
|
|
||||||
run: |
|
run: |
|
||||||
chmod +x /home/runner/.docker/cli-plugins/docker-buildx
|
make install
|
||||||
docker buildx version
|
docker buildx version
|
||||||
-
|
-
|
||||||
name: Init env vars
|
name: Init env vars
|
||||||
@@ -129,7 +75,7 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Install k3s
|
name: Install k3s
|
||||||
if: matrix.driver == 'kubernetes'
|
if: matrix.driver == 'kubernetes'
|
||||||
uses: debianmaster/actions-k3s@b9cf3f599fd118699a3c8a0d18a2f2bda6cf4ce4
|
uses: debianmaster/actions-k3s@v1.0.3
|
||||||
id: k3s
|
id: k3s
|
||||||
with:
|
with:
|
||||||
version: v1.21.2-k3s1
|
version: v1.21.2-k3s1
|
||||||
@@ -143,17 +89,6 @@ jobs:
|
|||||||
if: matrix.driver == 'kubernetes'
|
if: matrix.driver == 'kubernetes'
|
||||||
run: |
|
run: |
|
||||||
kubectl get nodes
|
kubectl get nodes
|
||||||
-
|
|
||||||
name: Launch remote buildkitd
|
|
||||||
if: matrix.driver == 'remote'
|
|
||||||
run: |
|
|
||||||
docker run -d \
|
|
||||||
--privileged \
|
|
||||||
--name=remote-buildkit \
|
|
||||||
-p 1234:1234 \
|
|
||||||
${{ matrix.buildkit }} \
|
|
||||||
--addr unix:///run/buildkit/buildkitd.sock \
|
|
||||||
--addr tcp://0.0.0.0:1234
|
|
||||||
-
|
-
|
||||||
name: Test
|
name: Test
|
||||||
run: |
|
run: |
|
||||||
@@ -162,5 +97,4 @@ jobs:
|
|||||||
BUILDKIT_IMAGE: ${{ matrix.buildkit }}
|
BUILDKIT_IMAGE: ${{ matrix.buildkit }}
|
||||||
DRIVER: ${{ matrix.driver }}
|
DRIVER: ${{ matrix.driver }}
|
||||||
DRIVER_OPT: ${{ matrix.driver-opt }}
|
DRIVER_OPT: ${{ matrix.driver-opt }}
|
||||||
ENDPOINT: ${{ matrix.endpoint }}
|
|
||||||
PLATFORMS: ${{ matrix.platforms }}
|
PLATFORMS: ${{ matrix.platforms }}
|
||||||
|
|||||||
25
.github/workflows/godev.yml
vendored
Normal file
25
.github/workflows/godev.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# Workflow used to make a request to proxy.golang.org to refresh cache on https://pkg.go.dev/github.com/docker/buildx
|
||||||
|
# when a released of buildx is produced
|
||||||
|
name: godev
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- 'v*'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
update:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Set up Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: 1.13
|
||||||
|
-
|
||||||
|
name: Call pkg.go.dev
|
||||||
|
run: |
|
||||||
|
go get github.com/${GITHUB_REPOSITORY}@${GITHUB_REF#refs/tags/}
|
||||||
|
env:
|
||||||
|
GO111MODULE: on
|
||||||
|
GOPROXY: https://proxy.golang.org
|
||||||
26
.github/workflows/validate.yml
vendored
26
.github/workflows/validate.yml
vendored
@@ -1,9 +1,5 @@
|
|||||||
name: validate
|
name: validate
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
push:
|
push:
|
||||||
@@ -30,13 +26,23 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v2
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
-
|
-
|
||||||
name: Run
|
name: Run
|
||||||
run: |
|
run: |
|
||||||
make ${{ matrix.target }}
|
make ${{ matrix.target }}
|
||||||
|
|
||||||
|
validate-docs-yaml:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs:
|
||||||
|
- validate
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Run
|
||||||
|
run: |
|
||||||
|
make docs
|
||||||
|
env:
|
||||||
|
FORMATS: yaml
|
||||||
|
|||||||
@@ -12,29 +12,19 @@ linters:
|
|||||||
- gofmt
|
- gofmt
|
||||||
- govet
|
- govet
|
||||||
- deadcode
|
- deadcode
|
||||||
- depguard
|
|
||||||
- goimports
|
- goimports
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- misspell
|
- misspell
|
||||||
- unused
|
- unused
|
||||||
- varcheck
|
- varcheck
|
||||||
- revive
|
- golint
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- typecheck
|
- typecheck
|
||||||
- structcheck
|
- structcheck
|
||||||
disable-all: true
|
disable-all: true
|
||||||
|
|
||||||
linters-settings:
|
|
||||||
depguard:
|
|
||||||
list-type: blacklist
|
|
||||||
include-go-root: true
|
|
||||||
packages:
|
|
||||||
# The io/ioutil package has been deprecated.
|
|
||||||
# https://go.dev/doc/go1.16#ioutil
|
|
||||||
- io/ioutil
|
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
exclude-rules:
|
exclude-rules:
|
||||||
- linters:
|
- linters:
|
||||||
- revive
|
- golint
|
||||||
text: "stutters"
|
text: "stutters"
|
||||||
|
|||||||
13
.yamllint.yml
Normal file
13
.yamllint.yml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
ignore: |
|
||||||
|
/vendor
|
||||||
|
|
||||||
|
extends: default
|
||||||
|
|
||||||
|
yaml-files:
|
||||||
|
- '*.yaml'
|
||||||
|
- '*.yml'
|
||||||
|
|
||||||
|
rules:
|
||||||
|
truthy: disable
|
||||||
|
line-length: disable
|
||||||
|
document-start: disable
|
||||||
17
Dockerfile
17
Dockerfile
@@ -1,13 +1,12 @@
|
|||||||
# syntax=docker/dockerfile:1.4
|
# syntax=docker/dockerfile:1.3
|
||||||
|
|
||||||
ARG GO_VERSION=1.18
|
ARG GO_VERSION=1.17
|
||||||
ARG XX_VERSION=1.1.2
|
ARG DOCKERD_VERSION=20.10.8
|
||||||
ARG DOCKERD_VERSION=20.10.14
|
|
||||||
|
|
||||||
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
||||||
|
|
||||||
# xx is a helper for cross-compilation
|
# xx is a helper for cross-compilation
|
||||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
FROM --platform=$BUILDPLATFORM tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04 AS xx
|
||||||
|
|
||||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
|
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
|
||||||
|
|
||||||
@@ -15,7 +14,6 @@ FROM golatest AS gobase
|
|||||||
COPY --from=xx / /
|
COPY --from=xx / /
|
||||||
RUN apk add --no-cache file git
|
RUN apk add --no-cache file git
|
||||||
ENV GOFLAGS=-mod=vendor
|
ENV GOFLAGS=-mod=vendor
|
||||||
ENV CGO_ENABLED=0
|
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
|
|
||||||
FROM gobase AS buildx-version
|
FROM gobase AS buildx-version
|
||||||
@@ -25,6 +23,7 @@ RUN --mount=target=. \
|
|||||||
echo -n "${VERSION}" | tee /tmp/.version;
|
echo -n "${VERSION}" | tee /tmp/.version;
|
||||||
|
|
||||||
FROM gobase AS buildx-build
|
FROM gobase AS buildx-build
|
||||||
|
ENV CGO_ENABLED=0
|
||||||
ARG LDFLAGS="-w -s"
|
ARG LDFLAGS="-w -s"
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
RUN --mount=type=bind,target=. \
|
RUN --mount=type=bind,target=. \
|
||||||
@@ -34,7 +33,7 @@ RUN --mount=type=bind,target=. \
|
|||||||
set -x; xx-go build -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/buildx ./cmd/buildx && \
|
set -x; xx-go build -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/buildx ./cmd/buildx && \
|
||||||
xx-verify --static /usr/bin/buildx
|
xx-verify --static /usr/bin/buildx
|
||||||
|
|
||||||
FROM gobase AS test
|
FROM buildx-build AS test
|
||||||
RUN --mount=type=bind,target=. \
|
RUN --mount=type=bind,target=. \
|
||||||
--mount=type=cache,target=/root/.cache \
|
--mount=type=cache,target=/root/.cache \
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
@@ -45,13 +44,13 @@ FROM scratch AS test-coverage
|
|||||||
COPY --from=test /tmp/coverage.txt /coverage.txt
|
COPY --from=test /tmp/coverage.txt /coverage.txt
|
||||||
|
|
||||||
FROM scratch AS binaries-unix
|
FROM scratch AS binaries-unix
|
||||||
COPY --link --from=buildx-build /usr/bin/buildx /
|
COPY --from=buildx-build /usr/bin/buildx /
|
||||||
|
|
||||||
FROM binaries-unix AS binaries-darwin
|
FROM binaries-unix AS binaries-darwin
|
||||||
FROM binaries-unix AS binaries-linux
|
FROM binaries-unix AS binaries-linux
|
||||||
|
|
||||||
FROM scratch AS binaries-windows
|
FROM scratch AS binaries-windows
|
||||||
COPY --link --from=buildx-build /usr/bin/buildx /buildx.exe
|
COPY --from=buildx-build /usr/bin/buildx /buildx.exe
|
||||||
|
|
||||||
FROM binaries-$TARGETOS AS binaries
|
FROM binaries-$TARGETOS AS binaries
|
||||||
|
|
||||||
|
|||||||
@@ -152,7 +152,6 @@ made through a pull request.
|
|||||||
people = [
|
people = [
|
||||||
"akihirosuda",
|
"akihirosuda",
|
||||||
"crazy-max",
|
"crazy-max",
|
||||||
"jedevc",
|
|
||||||
"tiborvass",
|
"tiborvass",
|
||||||
"tonistiigi",
|
"tonistiigi",
|
||||||
]
|
]
|
||||||
@@ -189,11 +188,6 @@ made through a pull request.
|
|||||||
Email = "contact@crazymax.dev"
|
Email = "contact@crazymax.dev"
|
||||||
GitHub = "crazy-max"
|
GitHub = "crazy-max"
|
||||||
|
|
||||||
[people.jedevc]
|
|
||||||
Name = "Justin Chadwell"
|
|
||||||
Email = "me@jedevc.com"
|
|
||||||
GitHub = "jedevc"
|
|
||||||
|
|
||||||
[people.thajeztah]
|
[people.thajeztah]
|
||||||
Name = "Sebastiaan van Stijn"
|
Name = "Sebastiaan van Stijn"
|
||||||
Email = "github@gone.nl"
|
Email = "github@gone.nl"
|
||||||
|
|||||||
147
README.md
147
README.md
@@ -1,10 +1,9 @@
|
|||||||
# buildx
|
# buildx
|
||||||
|
|
||||||
[](https://github.com/docker/buildx/releases/latest)
|
[](https://pkg.go.dev/github.com/docker/buildx)
|
||||||
[](https://pkg.go.dev/github.com/docker/buildx)
|
[](https://github.com/docker/buildx/actions?query=workflow%3Abuild)
|
||||||
[](https://github.com/docker/buildx/actions?query=workflow%3Abuild)
|
[](https://goreportcard.com/report/github.com/docker/buildx)
|
||||||
[](https://goreportcard.com/report/github.com/docker/buildx)
|
[](https://codecov.io/gh/docker/buildx)
|
||||||
[](https://codecov.io/gh/docker/buildx)
|
|
||||||
|
|
||||||
`buildx` is a Docker CLI plugin for extended build capabilities with
|
`buildx` is a Docker CLI plugin for extended build capabilities with
|
||||||
[BuildKit](https://github.com/moby/buildkit).
|
[BuildKit](https://github.com/moby/buildkit).
|
||||||
@@ -22,27 +21,17 @@ Key features:
|
|||||||
# Table of Contents
|
# Table of Contents
|
||||||
|
|
||||||
- [Installing](#installing)
|
- [Installing](#installing)
|
||||||
- [Windows and macOS](#windows-and-macos)
|
- [Docker](#docker)
|
||||||
- [Linux packages](#linux-packages)
|
- [Binary release](#binary-release)
|
||||||
- [Manual download](#manual-download)
|
- [From `Dockerfile`](#from-dockerfile)
|
||||||
- [Dockerfile](#dockerfile)
|
|
||||||
- [Set buildx as the default builder](#set-buildx-as-the-default-builder)
|
- [Set buildx as the default builder](#set-buildx-as-the-default-builder)
|
||||||
- [Building](#building)
|
- [Building](#building)
|
||||||
- [Getting started](#getting-started)
|
- [Getting started](#getting-started)
|
||||||
- [Building with buildx](#building-with-buildx)
|
- [Building with buildx](#building-with-buildx)
|
||||||
- [Working with builder instances](#working-with-builder-instances)
|
- [Working with builder instances](#working-with-builder-instances)
|
||||||
- [Building multi-platform images](#building-multi-platform-images)
|
- [Building multi-platform images](#building-multi-platform-images)
|
||||||
- [Guides](docs/guides)
|
- [High-level build options](#high-level-build-options)
|
||||||
- [High-level build options with Bake](docs/guides/bake/index.md)
|
- [Documentation](docs/reference/buildx.md)
|
||||||
- [CI/CD](docs/guides/cicd.md)
|
|
||||||
- [CNI networking](docs/guides/cni-networking.md)
|
|
||||||
- [Using a custom network](docs/guides/custom-network.md)
|
|
||||||
- [Using a custom registry configuration](docs/guides/custom-registry-config.md)
|
|
||||||
- [OpenTelemetry support](docs/guides/opentelemetry.md)
|
|
||||||
- [Registry mirror](docs/guides/registry-mirror.md)
|
|
||||||
- [Drivers](docs/guides/drivers/index.md)
|
|
||||||
- [Resource limiting](docs/guides/resource-limiting.md)
|
|
||||||
- [Reference](docs/reference/buildx.md)
|
|
||||||
- [`buildx bake`](docs/reference/buildx_bake.md)
|
- [`buildx bake`](docs/reference/buildx_bake.md)
|
||||||
- [`buildx build`](docs/reference/buildx_build.md)
|
- [`buildx build`](docs/reference/buildx_build.md)
|
||||||
- [`buildx create`](docs/reference/buildx_create.md)
|
- [`buildx create`](docs/reference/buildx_create.md)
|
||||||
@@ -67,60 +56,27 @@ Using `buildx` as a docker CLI plugin requires using Docker 19.03 or newer.
|
|||||||
A limited set of functionality works with older versions of Docker when
|
A limited set of functionality works with older versions of Docker when
|
||||||
invoking the binary directly.
|
invoking the binary directly.
|
||||||
|
|
||||||
## Windows and macOS
|
## Docker
|
||||||
|
|
||||||
Docker Buildx is included in [Docker Desktop](https://docs.docker.com/desktop/)
|
`buildx` comes bundled with Docker Desktop and in latest
|
||||||
for Windows and macOS.
|
[Docker CE packages](https://docs.docker.com/engine/install/), but may not be
|
||||||
|
included in third-party software components (in which case follow the
|
||||||
|
[binary release](#binary-release) instructions).
|
||||||
|
|
||||||
## Linux packages
|
## Binary release
|
||||||
|
|
||||||
Docker Linux packages also include Docker Buildx when installed using the
|
You can also download the latest `buildx` binary from the
|
||||||
[DEB or RPM packages](https://docs.docker.com/engine/install/).
|
[GitHub releases](https://github.com/docker/buildx/releases/latest) page, copy
|
||||||
|
it to `~/.docker/cli-plugins` folder with name `docker-buildx` and change the
|
||||||
|
permission to execute:
|
||||||
|
|
||||||
## Manual download
|
```console
|
||||||
|
$ chmod a+x ~/.docker/cli-plugins/docker-buildx
|
||||||
|
```
|
||||||
|
|
||||||
> **Important**
|
## From `Dockerfile`
|
||||||
>
|
|
||||||
> This section is for unattended installation of the buildx component. These
|
|
||||||
> instructions are mostly suitable for testing purposes. We do not recommend
|
|
||||||
> installing buildx using manual download in production environments as they
|
|
||||||
> will not be updated automatically with security updates.
|
|
||||||
>
|
|
||||||
> On Windows and macOS, we recommend that you install [Docker Desktop](https://docs.docker.com/desktop/)
|
|
||||||
> instead. For Linux, we recommend that you follow the [instructions specific for your distribution](#linux-packages).
|
|
||||||
|
|
||||||
You can also download the latest binary from the [GitHub releases page](https://github.com/docker/buildx/releases/latest).
|
Here is how to use buildx inside a Dockerfile through the
|
||||||
|
|
||||||
Rename the relevant binary and copy it to the destination matching your OS:
|
|
||||||
|
|
||||||
| OS | Binary name | Destination folder |
|
|
||||||
| -------- | -------------------- | -----------------------------------------|
|
|
||||||
| Linux | `docker-buildx` | `$HOME/.docker/cli-plugins` |
|
|
||||||
| macOS | `docker-buildx` | `$HOME/.docker/cli-plugins` |
|
|
||||||
| Windows | `docker-buildx.exe` | `%USERPROFILE%\.docker\cli-plugins` |
|
|
||||||
|
|
||||||
Or copy it into one of these folders for installing it system-wide.
|
|
||||||
|
|
||||||
On Unix environments:
|
|
||||||
|
|
||||||
* `/usr/local/lib/docker/cli-plugins` OR `/usr/local/libexec/docker/cli-plugins`
|
|
||||||
* `/usr/lib/docker/cli-plugins` OR `/usr/libexec/docker/cli-plugins`
|
|
||||||
|
|
||||||
On Windows:
|
|
||||||
|
|
||||||
* `C:\ProgramData\Docker\cli-plugins`
|
|
||||||
* `C:\Program Files\Docker\cli-plugins`
|
|
||||||
|
|
||||||
> **Note**
|
|
||||||
>
|
|
||||||
> On Unix environments, it may also be necessary to make it executable with `chmod +x`:
|
|
||||||
> ```shell
|
|
||||||
> $ chmod +x ~/.docker/cli-plugins/docker-buildx
|
|
||||||
> ```
|
|
||||||
|
|
||||||
## Dockerfile
|
|
||||||
|
|
||||||
Here is how to install and use Buildx inside a Dockerfile through the
|
|
||||||
[`docker/buildx-bin`](https://hub.docker.com/r/docker/buildx-bin) image:
|
[`docker/buildx-bin`](https://hub.docker.com/r/docker/buildx-bin) image:
|
||||||
|
|
||||||
```Dockerfile
|
```Dockerfile
|
||||||
@@ -141,17 +97,17 @@ To remove this alias, run [`docker buildx uninstall`](docs/reference/buildx_unin
|
|||||||
|
|
||||||
```console
|
```console
|
||||||
# Buildx 0.6+
|
# Buildx 0.6+
|
||||||
$ docker buildx bake "https://github.com/docker/buildx.git"
|
$ docker buildx bake "git://github.com/docker/buildx"
|
||||||
$ mkdir -p ~/.docker/cli-plugins
|
$ mkdir -p ~/.docker/cli-plugins
|
||||||
$ mv ./bin/buildx ~/.docker/cli-plugins/docker-buildx
|
$ mv ./bin/buildx ~/.docker/cli-plugins/docker-buildx
|
||||||
|
|
||||||
# Docker 19.03+
|
# Docker 19.03+
|
||||||
$ DOCKER_BUILDKIT=1 docker build --platform=local -o . "https://github.com/docker/buildx.git"
|
$ DOCKER_BUILDKIT=1 docker build --platform=local -o . "git://github.com/docker/buildx"
|
||||||
$ mkdir -p ~/.docker/cli-plugins
|
$ mkdir -p ~/.docker/cli-plugins
|
||||||
$ mv buildx ~/.docker/cli-plugins/docker-buildx
|
$ mv buildx ~/.docker/cli-plugins/docker-buildx
|
||||||
|
|
||||||
# Local
|
# Local
|
||||||
$ git clone https://github.com/docker/buildx.git && cd buildx
|
$ git clone git://github.com/docker/buildx && cd buildx
|
||||||
$ make install
|
$ make install
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -185,22 +141,28 @@ specifying target platform. In addition, Buildx also supports new features that
|
|||||||
are not yet available for regular `docker build` like building manifest lists,
|
are not yet available for regular `docker build` like building manifest lists,
|
||||||
distributed caching, and exporting build results to OCI image tarballs.
|
distributed caching, and exporting build results to OCI image tarballs.
|
||||||
|
|
||||||
Buildx is flexible and can be run in different configurations that are exposed
|
Buildx is supposed to be flexible and can be run in different configurations
|
||||||
through various "drivers". Each driver defines how and where a build should
|
that are exposed through a driver concept. Currently, we support a
|
||||||
run, and have different feature sets.
|
[`docker` driver](docs/reference/buildx_create.md#docker-driver) that uses
|
||||||
|
the BuildKit library bundled into the Docker daemon binary, a
|
||||||
|
[`docker-container` driver](docs/reference/buildx_create.md#docker-container-driver)
|
||||||
|
that automatically launches BuildKit inside a Docker container and a
|
||||||
|
[`kubernetes` driver](docs/reference/buildx_create.md#kubernetes-driver) to
|
||||||
|
spin up pods with defined BuildKit container image to build your images. We
|
||||||
|
plan to add more drivers in the future.
|
||||||
|
|
||||||
We currently support the following drivers:
|
The user experience of using buildx is very similar across drivers, but there
|
||||||
- The `docker` driver ([guide](docs/guides/drivers/docker.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
are some features that are not currently supported by the `docker` driver,
|
||||||
- The `docker-container` driver ([guide](docs/guides/drivers/docker-container.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
because the BuildKit library bundled into docker daemon currently uses a
|
||||||
- The `kubernetes` driver ([guide](docs/guides/drivers/kubernetes.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
different storage component. In contrast, all images built with `docker` driver
|
||||||
- The `remote` driver ([guide](docs/guides/drivers/remote.md))
|
are automatically added to the `docker images` view by default, whereas when
|
||||||
|
using other drivers the method for outputting an image needs to be selected
|
||||||
For more information on drivers, see the [drivers guide](docs/guides/drivers/index.md).
|
with `--output`.
|
||||||
|
|
||||||
## Working with builder instances
|
## Working with builder instances
|
||||||
|
|
||||||
By default, buildx will initially use the `docker` driver if it is supported,
|
By default, buildx will initially use the `docker` driver if it is supported,
|
||||||
providing a very similar user experience to the native `docker build`. Note that
|
providing a very similar user experience to the native `docker build`. Note tha
|
||||||
you must use a local shared daemon to build your applications.
|
you must use a local shared daemon to build your applications.
|
||||||
|
|
||||||
Buildx allows you to create new instances of isolated builders. This can be
|
Buildx allows you to create new instances of isolated builders. This can be
|
||||||
@@ -311,7 +273,26 @@ cross-compilation helpers for more advanced use-cases.
|
|||||||
|
|
||||||
## High-level build options
|
## High-level build options
|
||||||
|
|
||||||
See [`docs/guides/bake/index.md`](docs/guides/bake/index.md) for more details.
|
Buildx also aims to provide support for high-level build concepts that go beyond
|
||||||
|
invoking a single build command. We want to support building all the images in
|
||||||
|
your application together and let the users define project specific reusable
|
||||||
|
build flows that can then be easily invoked by anyone.
|
||||||
|
|
||||||
|
BuildKit efficiently handles multiple concurrent build requests and
|
||||||
|
de-duplicating work. The build commands can be combined with general-purpose
|
||||||
|
command runners (for example, `make`). However, these tools generally invoke
|
||||||
|
builds in sequence and therefore cannot leverage the full potential of BuildKit
|
||||||
|
parallelization, or combine BuildKit’s output for the user. For this use case,
|
||||||
|
we have added a command called [`docker buildx bake`](docs/reference/buildx_bake.md).
|
||||||
|
|
||||||
|
The `bake` command supports building images from compose files, similar to
|
||||||
|
[`docker-compose build`](https://docs.docker.com/compose/reference/build/),
|
||||||
|
but allowing all the services to be built concurrently as part of a single
|
||||||
|
request.
|
||||||
|
|
||||||
|
There is also support for custom build rules from HCL/JSON files allowing
|
||||||
|
better code reuse and different target groups. The design of bake is in very
|
||||||
|
early stages and we are looking for feedback from users.
|
||||||
|
|
||||||
# Contributing
|
# Contributing
|
||||||
|
|
||||||
|
|||||||
374
bake/bake.go
374
bake/bake.go
@@ -4,12 +4,10 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/csv"
|
"encoding/csv"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@@ -17,21 +15,15 @@ import (
|
|||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
"github.com/docker/buildx/util/buildflags"
|
"github.com/docker/buildx/util/buildflags"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/cli/cli/config"
|
"github.com/docker/docker/pkg/urlutil"
|
||||||
"github.com/docker/docker/builder/remotecontext/urlutil"
|
|
||||||
hcl "github.com/hashicorp/hcl/v2"
|
hcl "github.com/hashicorp/hcl/v2"
|
||||||
"github.com/moby/buildkit/client/llb"
|
"github.com/moby/buildkit/client/llb"
|
||||||
"github.com/moby/buildkit/session/auth/authprovider"
|
"github.com/moby/buildkit/session/auth/authprovider"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var httpPrefix = regexp.MustCompile(`^https?://`)
|
||||||
httpPrefix = regexp.MustCompile(`^https?://`)
|
var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
|
||||||
gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
|
|
||||||
|
|
||||||
validTargetNameChars = `[a-zA-Z0-9_-]+`
|
|
||||||
targetNamePattern = regexp.MustCompile(`^` + validTargetNameChars + `$`)
|
|
||||||
)
|
|
||||||
|
|
||||||
type File struct {
|
type File struct {
|
||||||
Name string
|
Name string
|
||||||
@@ -63,21 +55,12 @@ func ReadLocalFiles(names []string) ([]File, error) {
|
|||||||
out := make([]File, 0, len(names))
|
out := make([]File, 0, len(names))
|
||||||
|
|
||||||
for _, n := range names {
|
for _, n := range names {
|
||||||
var dt []byte
|
dt, err := ioutil.ReadFile(n)
|
||||||
var err error
|
if err != nil {
|
||||||
if n == "-" {
|
if isDefault && errors.Is(err, os.ErrNotExist) {
|
||||||
dt, err = io.ReadAll(os.Stdin)
|
continue
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
dt, err = os.ReadFile(n)
|
|
||||||
if err != nil {
|
|
||||||
if isDefault && errors.Is(err, os.ErrNotExist) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
out = append(out, File{Name: n, Data: dt})
|
out = append(out, File{Name: n, Data: dt})
|
||||||
}
|
}
|
||||||
@@ -90,10 +73,6 @@ func ReadTargets(ctx context.Context, files []File, targets, overrides []string,
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, t := range targets {
|
|
||||||
targets[i] = sanitizeTargetName(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
o, err := c.newOverrides(overrides)
|
o, err := c.newOverrides(overrides)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
@@ -120,80 +99,12 @@ func ReadTargets(ctx context.Context, files []File, targets, overrides []string,
|
|||||||
g = []*Group{{Targets: group.Targets}}
|
g = []*Group{{Targets: group.Targets}}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var gt []string
|
g = []*Group{{Targets: targets}}
|
||||||
for _, target := range targets {
|
|
||||||
isGroup := false
|
|
||||||
for _, group := range c.Groups {
|
|
||||||
if target == group.Name {
|
|
||||||
gt = append(gt, group.Targets...)
|
|
||||||
isGroup = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !isGroup {
|
|
||||||
gt = append(gt, target)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
g = []*Group{{Targets: dedupSlice(gt)}}
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, t := range m {
|
|
||||||
if err := c.loadLinks(name, t, m, o, nil); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return m, g, nil
|
return m, g, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func dedupSlice(s []string) []string {
|
|
||||||
if len(s) == 0 {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
var res []string
|
|
||||||
seen := make(map[string]struct{})
|
|
||||||
for _, val := range s {
|
|
||||||
if _, ok := seen[val]; !ok {
|
|
||||||
res = append(res, val)
|
|
||||||
seen[val] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func dedupMap(ms ...map[string]string) map[string]string {
|
|
||||||
if len(ms) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
res := map[string]string{}
|
|
||||||
for _, m := range ms {
|
|
||||||
if len(m) == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for k, v := range m {
|
|
||||||
if _, ok := res[k]; !ok {
|
|
||||||
res[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func sliceToMap(env []string) (res map[string]string) {
|
|
||||||
res = make(map[string]string)
|
|
||||||
for _, s := range env {
|
|
||||||
kv := strings.SplitN(s, "=", 2)
|
|
||||||
key := kv[0]
|
|
||||||
switch {
|
|
||||||
case len(kv) == 1:
|
|
||||||
res[key] = ""
|
|
||||||
default:
|
|
||||||
res[key] = kv[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) {
|
func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
err = formatHCLError(err, files)
|
err = formatHCLError(err, files)
|
||||||
@@ -227,9 +138,8 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
|
|||||||
|
|
||||||
if len(fs) > 0 {
|
if len(fs) > 0 {
|
||||||
if err := hclparser.Parse(hcl.MergeFiles(fs), hclparser.Opt{
|
if err := hclparser.Parse(hcl.MergeFiles(fs), hclparser.Opt{
|
||||||
LookupVar: os.LookupEnv,
|
LookupVar: os.LookupEnv,
|
||||||
Vars: defaults,
|
Vars: defaults,
|
||||||
ValidateLabel: validateTargetName,
|
|
||||||
}, &c); err.HasErrors() {
|
}, &c); err.HasErrors() {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -257,22 +167,15 @@ func ParseFile(dt []byte, fn string) (*Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ParseComposeFile(dt []byte, fn string) (*Config, bool, error) {
|
func ParseComposeFile(dt []byte, fn string) (*Config, bool, error) {
|
||||||
envs := sliceToMap(os.Environ())
|
|
||||||
if wd, err := os.Getwd(); err == nil {
|
|
||||||
envs, err = loadDotEnv(envs, wd)
|
|
||||||
if err != nil {
|
|
||||||
return nil, true, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
fnl := strings.ToLower(fn)
|
fnl := strings.ToLower(fn)
|
||||||
if strings.HasSuffix(fnl, ".yml") || strings.HasSuffix(fnl, ".yaml") {
|
if strings.HasSuffix(fnl, ".yml") || strings.HasSuffix(fnl, ".yaml") {
|
||||||
cfg, err := ParseCompose(dt, envs)
|
cfg, err := ParseCompose(dt)
|
||||||
return cfg, true, err
|
return cfg, true, err
|
||||||
}
|
}
|
||||||
if strings.HasSuffix(fnl, ".json") || strings.HasSuffix(fnl, ".hcl") {
|
if strings.HasSuffix(fnl, ".json") || strings.HasSuffix(fnl, ".hcl") {
|
||||||
return nil, false, nil
|
return nil, false, nil
|
||||||
}
|
}
|
||||||
cfg, err := ParseCompose(dt, envs)
|
cfg, err := ParseCompose(dt)
|
||||||
return cfg, err == nil, err
|
return cfg, err == nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -356,46 +259,10 @@ func (c Config) expandTargets(pattern string) ([]string, error) {
|
|||||||
return names, nil
|
return names, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[string]map[string]Override, visited []string) error {
|
|
||||||
visited = append(visited, name)
|
|
||||||
for _, v := range t.Contexts {
|
|
||||||
if strings.HasPrefix(v, "target:") {
|
|
||||||
target := strings.TrimPrefix(v, "target:")
|
|
||||||
if target == t.Name {
|
|
||||||
return errors.Errorf("target %s cannot link to itself", target)
|
|
||||||
}
|
|
||||||
for _, v := range visited {
|
|
||||||
if v == target {
|
|
||||||
return errors.Errorf("infinite loop from %s to %s", name, target)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t2, ok := m[target]
|
|
||||||
if !ok {
|
|
||||||
var err error
|
|
||||||
t2, err = c.ResolveTarget(target, o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
t2.Outputs = nil
|
|
||||||
t2.linked = true
|
|
||||||
m[target] = t2
|
|
||||||
}
|
|
||||||
if err := c.loadLinks(target, t2, m, o, visited); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(t.Platforms) > 1 && len(t2.Platforms) > 1 {
|
|
||||||
if !sliceEqual(t.Platforms, t2.Platforms) {
|
|
||||||
return errors.Errorf("target %s can't be used by %s because it is defined for different platforms %v and %v", target, name, t2.Platforms, t.Platforms)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Config) newOverrides(v []string) (map[string]map[string]Override, error) {
|
func (c Config) newOverrides(v []string) (map[string]map[string]Override, error) {
|
||||||
m := map[string]map[string]Override{}
|
m := map[string]map[string]Override{}
|
||||||
for _, v := range v {
|
for _, v := range v {
|
||||||
|
|
||||||
parts := strings.SplitN(v, "=", 2)
|
parts := strings.SplitN(v, "=", 2)
|
||||||
keys := strings.SplitN(parts[0], ".", 3)
|
keys := strings.SplitN(parts[0], ".", 3)
|
||||||
if len(keys) < 2 {
|
if len(keys) < 2 {
|
||||||
@@ -440,11 +307,6 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
|||||||
o.Value = v
|
o.Value = v
|
||||||
}
|
}
|
||||||
fallthrough
|
fallthrough
|
||||||
case "contexts":
|
|
||||||
if len(keys) != 3 {
|
|
||||||
return nil, errors.Errorf("invalid key %s, contexts requires name", parts[0])
|
|
||||||
}
|
|
||||||
fallthrough
|
|
||||||
default:
|
default:
|
||||||
if len(parts) == 2 {
|
if len(parts) == 2 {
|
||||||
o.Value = parts[1]
|
o.Value = parts[1]
|
||||||
@@ -458,12 +320,12 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) ResolveGroup(name string) []string {
|
func (c Config) ResolveGroup(name string) []string {
|
||||||
return dedupSlice(c.group(name, map[string][]string{}))
|
return c.group(name, map[string]struct{}{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) group(name string, visited map[string][]string) []string {
|
func (c Config) group(name string, visited map[string]struct{}) []string {
|
||||||
if _, ok := visited[name]; ok {
|
if _, ok := visited[name]; ok {
|
||||||
return visited[name]
|
return nil
|
||||||
}
|
}
|
||||||
var g *Group
|
var g *Group
|
||||||
for _, group := range c.Groups {
|
for _, group := range c.Groups {
|
||||||
@@ -475,26 +337,19 @@ func (c Config) group(name string, visited map[string][]string) []string {
|
|||||||
if g == nil {
|
if g == nil {
|
||||||
return []string{name}
|
return []string{name}
|
||||||
}
|
}
|
||||||
visited[name] = []string{}
|
visited[name] = struct{}{}
|
||||||
targets := make([]string, 0, len(g.Targets))
|
targets := make([]string, 0, len(g.Targets))
|
||||||
for _, t := range g.Targets {
|
for _, t := range g.Targets {
|
||||||
tgroup := c.group(t, visited)
|
targets = append(targets, c.group(t, visited)...)
|
||||||
if len(tgroup) > 0 {
|
|
||||||
targets = append(targets, tgroup...)
|
|
||||||
} else {
|
|
||||||
targets = append(targets, t)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
visited[name] = targets
|
|
||||||
return targets
|
return targets
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) ResolveTarget(name string, overrides map[string]map[string]Override) (*Target, error) {
|
func (c Config) ResolveTarget(name string, overrides map[string]map[string]Override) (*Target, error) {
|
||||||
t, err := c.target(name, map[string]*Target{}, overrides)
|
t, err := c.target(name, map[string]struct{}{}, overrides)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
t.Inherits = nil
|
|
||||||
if t.Context == nil {
|
if t.Context == nil {
|
||||||
s := "."
|
s := "."
|
||||||
t.Context = &s
|
t.Context = &s
|
||||||
@@ -506,11 +361,11 @@ func (c Config) ResolveTarget(name string, overrides map[string]map[string]Overr
|
|||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) target(name string, visited map[string]*Target, overrides map[string]map[string]Override) (*Target, error) {
|
func (c Config) target(name string, visited map[string]struct{}, overrides map[string]map[string]Override) (*Target, error) {
|
||||||
if t, ok := visited[name]; ok {
|
if _, ok := visited[name]; ok {
|
||||||
return t, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
visited[name] = nil
|
visited[name] = struct{}{}
|
||||||
var t *Target
|
var t *Target
|
||||||
for _, target := range c.Targets {
|
for _, target := range c.Targets {
|
||||||
if target.Name == name {
|
if target.Name == name {
|
||||||
@@ -531,6 +386,7 @@ func (c Config) target(name string, visited map[string]*Target, overrides map[st
|
|||||||
tt.Merge(t)
|
tt.Merge(t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
t.Inherits = nil
|
||||||
m := defaultTarget()
|
m := defaultTarget()
|
||||||
m.Merge(tt)
|
m.Merge(tt)
|
||||||
m.Merge(t)
|
m.Merge(t)
|
||||||
@@ -538,8 +394,8 @@ func (c Config) target(name string, visited map[string]*Target, overrides map[st
|
|||||||
if err := tt.AddOverrides(overrides[name]); err != nil {
|
if err := tt.AddOverrides(overrides[name]); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tt.normalize()
|
tt.normalize()
|
||||||
visited[name] = tt
|
|
||||||
return tt, nil
|
return tt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -556,7 +412,6 @@ type Target struct {
|
|||||||
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional"`
|
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional"`
|
||||||
|
|
||||||
Context *string `json:"context,omitempty" hcl:"context,optional"`
|
Context *string `json:"context,omitempty" hcl:"context,optional"`
|
||||||
Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional"`
|
|
||||||
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional"`
|
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional"`
|
||||||
DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional"`
|
DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional"`
|
||||||
Args map[string]string `json:"args,omitempty" hcl:"args,optional"`
|
Args map[string]string `json:"args,omitempty" hcl:"args,optional"`
|
||||||
@@ -571,12 +426,8 @@ type Target struct {
|
|||||||
Outputs []string `json:"output,omitempty" hcl:"output,optional"`
|
Outputs []string `json:"output,omitempty" hcl:"output,optional"`
|
||||||
Pull *bool `json:"pull,omitempty" hcl:"pull,optional"`
|
Pull *bool `json:"pull,omitempty" hcl:"pull,optional"`
|
||||||
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional"`
|
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional"`
|
||||||
NetworkMode *string `json:"-" hcl:"-"`
|
|
||||||
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional"`
|
|
||||||
// IMPORTANT: if you add more fields here, do not forget to update newOverrides and docs/guides/bake/file-definition.md.
|
|
||||||
|
|
||||||
// linked is a private field to mark a target used as a linked one
|
// IMPORTANT: if you add more fields here, do not forget to update newOverrides and README.
|
||||||
linked bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Target) normalize() {
|
func (t *Target) normalize() {
|
||||||
@@ -587,16 +438,6 @@ func (t *Target) normalize() {
|
|||||||
t.CacheFrom = removeDupes(t.CacheFrom)
|
t.CacheFrom = removeDupes(t.CacheFrom)
|
||||||
t.CacheTo = removeDupes(t.CacheTo)
|
t.CacheTo = removeDupes(t.CacheTo)
|
||||||
t.Outputs = removeDupes(t.Outputs)
|
t.Outputs = removeDupes(t.Outputs)
|
||||||
t.NoCacheFilter = removeDupes(t.NoCacheFilter)
|
|
||||||
|
|
||||||
for k, v := range t.Contexts {
|
|
||||||
if v == "" {
|
|
||||||
delete(t.Contexts, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(t.Contexts) == 0 {
|
|
||||||
t.Contexts = nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Target) Merge(t2 *Target) {
|
func (t *Target) Merge(t2 *Target) {
|
||||||
@@ -615,12 +456,6 @@ func (t *Target) Merge(t2 *Target) {
|
|||||||
}
|
}
|
||||||
t.Args[k] = v
|
t.Args[k] = v
|
||||||
}
|
}
|
||||||
for k, v := range t2.Contexts {
|
|
||||||
if t.Contexts == nil {
|
|
||||||
t.Contexts = map[string]string{}
|
|
||||||
}
|
|
||||||
t.Contexts[k] = v
|
|
||||||
}
|
|
||||||
for k, v := range t2.Labels {
|
for k, v := range t2.Labels {
|
||||||
if t.Labels == nil {
|
if t.Labels == nil {
|
||||||
t.Labels = map[string]string{}
|
t.Labels = map[string]string{}
|
||||||
@@ -657,12 +492,6 @@ func (t *Target) Merge(t2 *Target) {
|
|||||||
if t2.NoCache != nil {
|
if t2.NoCache != nil {
|
||||||
t.NoCache = t2.NoCache
|
t.NoCache = t2.NoCache
|
||||||
}
|
}
|
||||||
if t2.NetworkMode != nil {
|
|
||||||
t.NetworkMode = t2.NetworkMode
|
|
||||||
}
|
|
||||||
if t2.NoCacheFilter != nil { // merge
|
|
||||||
t.NoCacheFilter = append(t.NoCacheFilter, t2.NoCacheFilter...)
|
|
||||||
}
|
|
||||||
t.Inherits = append(t.Inherits, t2.Inherits...)
|
t.Inherits = append(t.Inherits, t2.Inherits...)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -683,14 +512,7 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
|||||||
t.Args = map[string]string{}
|
t.Args = map[string]string{}
|
||||||
}
|
}
|
||||||
t.Args[keys[1]] = value
|
t.Args[keys[1]] = value
|
||||||
case "contexts":
|
|
||||||
if len(keys) != 2 {
|
|
||||||
return errors.Errorf("contexts require name")
|
|
||||||
}
|
|
||||||
if t.Contexts == nil {
|
|
||||||
t.Contexts = map[string]string{}
|
|
||||||
}
|
|
||||||
t.Contexts[keys[1]] = value
|
|
||||||
case "labels":
|
case "labels":
|
||||||
if len(keys) != 2 {
|
if len(keys) != 2 {
|
||||||
return errors.Errorf("labels require name")
|
return errors.Errorf("labels require name")
|
||||||
@@ -721,8 +543,6 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
|||||||
return errors.Errorf("invalid value %s for boolean key no-cache", value)
|
return errors.Errorf("invalid value %s for boolean key no-cache", value)
|
||||||
}
|
}
|
||||||
t.NoCache = &noCache
|
t.NoCache = &noCache
|
||||||
case "no-cache-filter":
|
|
||||||
t.NoCacheFilter = o.ArrValue
|
|
||||||
case "pull":
|
case "pull":
|
||||||
pull, err := strconv.ParseBool(value)
|
pull, err := strconv.ParseBool(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -766,21 +586,6 @@ func updateContext(t *build.Inputs, inp *Input) {
|
|||||||
if inp == nil || inp.State == nil {
|
if inp == nil || inp.State == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range t.NamedContexts {
|
|
||||||
if v.Path == "." {
|
|
||||||
t.NamedContexts[k] = build.NamedContext{Path: inp.URL}
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(v.Path, "cwd://") || strings.HasPrefix(v.Path, "target:") || strings.HasPrefix(v.Path, "docker-image:") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if IsRemoteURL(v.Path) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
st := llb.Scratch().File(llb.Copy(*inp.State, v.Path, "/"), llb.WithCustomNamef("set context %s to %s", k, v.Path))
|
|
||||||
t.NamedContexts[k] = build.NamedContext{State: &st}
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.ContextPath == "." {
|
if t.ContextPath == "." {
|
||||||
t.ContextPath = inp.URL
|
t.ContextPath = inp.URL
|
||||||
return
|
return
|
||||||
@@ -795,59 +600,6 @@ func updateContext(t *build.Inputs, inp *Input) {
|
|||||||
t.ContextState = &st
|
t.ContextState = &st
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateContextsEntitlements is a basic check to ensure contexts do not
|
|
||||||
// escape local directories when loaded from remote sources. This is to be
|
|
||||||
// replaced with proper entitlements support in the future.
|
|
||||||
func validateContextsEntitlements(t build.Inputs, inp *Input) error {
|
|
||||||
if inp == nil || inp.State == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if v, ok := os.LookupEnv("BAKE_ALLOW_REMOTE_FS_ACCESS"); ok {
|
|
||||||
if vv, _ := strconv.ParseBool(v); vv {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if t.ContextState == nil {
|
|
||||||
if err := checkPath(t.ContextPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, v := range t.NamedContexts {
|
|
||||||
if v.State != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := checkPath(v.Path); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkPath(p string) error {
|
|
||||||
if IsRemoteURL(p) || strings.HasPrefix(p, "target:") || strings.HasPrefix(p, "docker-image:") {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
p, err := filepath.EvalSymlinks(p)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
wd, err := os.Getwd()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rel, err := filepath.Rel(wd, p)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
|
||||||
return errors.Errorf("path %s is outside of the working directory, please set BAKE_ALLOW_REMOTE_FS_ACCESS=1", p)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||||
if v := t.Context; v != nil && *v == "-" {
|
if v := t.Context; v != nil && *v == "-" {
|
||||||
return nil, errors.Errorf("context from stdin not allowed in bake")
|
return nil, errors.Errorf("context from stdin not allowed in bake")
|
||||||
@@ -880,15 +632,10 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
if t.Pull != nil {
|
if t.Pull != nil {
|
||||||
pull = *t.Pull
|
pull = *t.Pull
|
||||||
}
|
}
|
||||||
networkMode := ""
|
|
||||||
if t.NetworkMode != nil {
|
|
||||||
networkMode = *t.NetworkMode
|
|
||||||
}
|
|
||||||
|
|
||||||
bi := build.Inputs{
|
bi := build.Inputs{
|
||||||
ContextPath: contextPath,
|
ContextPath: contextPath,
|
||||||
DockerfilePath: dockerfilePath,
|
DockerfilePath: dockerfilePath,
|
||||||
NamedContexts: toNamedContexts(t.Contexts),
|
|
||||||
}
|
}
|
||||||
if t.DockerfileInline != nil {
|
if t.DockerfileInline != nil {
|
||||||
bi.DockerfileInline = *t.DockerfileInline
|
bi.DockerfileInline = *t.DockerfileInline
|
||||||
@@ -897,28 +644,16 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
if strings.HasPrefix(bi.ContextPath, "cwd://") {
|
if strings.HasPrefix(bi.ContextPath, "cwd://") {
|
||||||
bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://"))
|
bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://"))
|
||||||
}
|
}
|
||||||
for k, v := range bi.NamedContexts {
|
|
||||||
if strings.HasPrefix(v.Path, "cwd://") {
|
|
||||||
bi.NamedContexts[k] = build.NamedContext{Path: path.Clean(strings.TrimPrefix(v.Path, "cwd://"))}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := validateContextsEntitlements(bi, inp); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Context = &bi.ContextPath
|
t.Context = &bi.ContextPath
|
||||||
|
|
||||||
bo := &build.Options{
|
bo := &build.Options{
|
||||||
Inputs: bi,
|
Inputs: bi,
|
||||||
Tags: t.Tags,
|
Tags: t.Tags,
|
||||||
BuildArgs: t.Args,
|
BuildArgs: t.Args,
|
||||||
Labels: t.Labels,
|
Labels: t.Labels,
|
||||||
NoCache: noCache,
|
NoCache: noCache,
|
||||||
NoCacheFilter: t.NoCacheFilter,
|
Pull: pull,
|
||||||
Pull: pull,
|
|
||||||
NetworkMode: networkMode,
|
|
||||||
Linked: t.linked,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
platforms, err := platformutil.Parse(t.Platforms)
|
platforms, err := platformutil.Parse(t.Platforms)
|
||||||
@@ -927,8 +662,7 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
}
|
}
|
||||||
bo.Platforms = platforms
|
bo.Platforms = platforms
|
||||||
|
|
||||||
dockerConfig := config.LoadDefaultConfigFile(os.Stderr)
|
bo.Session = append(bo.Session, authprovider.NewDockerAuthProvider(os.Stderr))
|
||||||
bo.Session = append(bo.Session, authprovider.NewDockerAuthProvider(dockerConfig))
|
|
||||||
|
|
||||||
secrets, err := buildflags.ParseSecretSpecs(t.Secrets)
|
secrets, err := buildflags.ParseSecretSpecs(t.Secrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1012,39 +746,3 @@ func parseOutputType(str string) string {
|
|||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func validateTargetName(name string) error {
|
|
||||||
if !targetNamePattern.MatchString(name) {
|
|
||||||
return errors.Errorf("only %q are allowed", validTargetNameChars)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func sanitizeTargetName(target string) string {
|
|
||||||
// as stipulated in compose spec, service name can contain a dot so as
|
|
||||||
// best-effort and to avoid any potential ambiguity, we replace the dot
|
|
||||||
// with an underscore.
|
|
||||||
return strings.ReplaceAll(target, ".", "_")
|
|
||||||
}
|
|
||||||
|
|
||||||
func sliceEqual(s1, s2 []string) bool {
|
|
||||||
if len(s1) != len(s2) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
sort.Strings(s1)
|
|
||||||
sort.Strings(s2)
|
|
||||||
for i := range s1 {
|
|
||||||
if s1[i] != s2[i] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func toNamedContexts(m map[string]string) map[string]build.NamedContext {
|
|
||||||
m2 := make(map[string]build.NamedContext, len(m))
|
|
||||||
for k, v := range m {
|
|
||||||
m2[k] = build.NamedContext{Path: v}
|
|
||||||
}
|
|
||||||
return m2
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -203,7 +203,7 @@ func TestPushOverride(t *testing.T) {
|
|||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
fp := File{
|
fp := File{
|
||||||
Name: "docker-bake.hcl",
|
Name: "docker-bake.hc",
|
||||||
Data: []byte(
|
Data: []byte(
|
||||||
`target "app" {
|
`target "app" {
|
||||||
output = ["type=image,compression=zstd"]
|
output = ["type=image,compression=zstd"]
|
||||||
@@ -217,7 +217,7 @@ func TestPushOverride(t *testing.T) {
|
|||||||
require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0])
|
require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0])
|
||||||
|
|
||||||
fp = File{
|
fp = File{
|
||||||
Name: "docker-bake.hcl",
|
Name: "docker-bake.hc",
|
||||||
Data: []byte(
|
Data: []byte(
|
||||||
`target "app" {
|
`target "app" {
|
||||||
output = ["type=image,compression=zstd"]
|
output = ["type=image,compression=zstd"]
|
||||||
@@ -231,7 +231,7 @@ func TestPushOverride(t *testing.T) {
|
|||||||
require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0])
|
require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0])
|
||||||
|
|
||||||
fp = File{
|
fp = File{
|
||||||
Name: "docker-bake.hcl",
|
Name: "docker-bake.hc",
|
||||||
Data: []byte(
|
Data: []byte(
|
||||||
`target "app" {
|
`target "app" {
|
||||||
}`),
|
}`),
|
||||||
@@ -278,19 +278,9 @@ services:
|
|||||||
`),
|
`),
|
||||||
}
|
}
|
||||||
|
|
||||||
fp3 := File{
|
|
||||||
Name: "docker-compose3.yml",
|
|
||||||
Data: []byte(
|
|
||||||
`version: "3"
|
|
||||||
services:
|
|
||||||
webapp:
|
|
||||||
entrypoint: echo 1
|
|
||||||
`),
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
|
|
||||||
m, g, err := ReadTargets(ctx, []File{fp, fp2, fp3}, []string{"default"}, nil, nil)
|
m, g, err := ReadTargets(ctx, []File{fp, fp2}, []string{"default"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 3, len(m))
|
require.Equal(t, 3, len(m))
|
||||||
@@ -307,67 +297,6 @@ services:
|
|||||||
require.Equal(t, []string{"db", "newservice", "webapp"}, g[0].Targets)
|
require.Equal(t, []string{"db", "newservice", "webapp"}, g[0].Targets)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadTargetsWithDotCompose(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
|
|
||||||
fp := File{
|
|
||||||
Name: "docker-compose.yml",
|
|
||||||
Data: []byte(
|
|
||||||
`version: "3"
|
|
||||||
services:
|
|
||||||
web.app:
|
|
||||||
build:
|
|
||||||
dockerfile: Dockerfile.webapp
|
|
||||||
args:
|
|
||||||
buildno: 1
|
|
||||||
`),
|
|
||||||
}
|
|
||||||
|
|
||||||
fp2 := File{
|
|
||||||
Name: "docker-compose2.yml",
|
|
||||||
Data: []byte(
|
|
||||||
`version: "3"
|
|
||||||
services:
|
|
||||||
web_app:
|
|
||||||
build:
|
|
||||||
args:
|
|
||||||
buildno2: 12
|
|
||||||
`),
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.TODO()
|
|
||||||
|
|
||||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"web.app"}, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(m))
|
|
||||||
_, ok := m["web_app"]
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, "Dockerfile.webapp", *m["web_app"].Dockerfile)
|
|
||||||
require.Equal(t, "1", m["web_app"].Args["buildno"])
|
|
||||||
|
|
||||||
m, _, err = ReadTargets(ctx, []File{fp2}, []string{"web_app"}, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(m))
|
|
||||||
_, ok = m["web_app"]
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, "Dockerfile", *m["web_app"].Dockerfile)
|
|
||||||
require.Equal(t, "12", m["web_app"].Args["buildno2"])
|
|
||||||
|
|
||||||
m, g, err := ReadTargets(ctx, []File{fp, fp2}, []string{"default"}, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(m))
|
|
||||||
_, ok = m["web_app"]
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, "Dockerfile.webapp", *m["web_app"].Dockerfile)
|
|
||||||
require.Equal(t, ".", *m["web_app"].Context)
|
|
||||||
require.Equal(t, "1", m["web_app"].Args["buildno"])
|
|
||||||
require.Equal(t, "12", m["web_app"].Args["buildno2"])
|
|
||||||
|
|
||||||
require.Equal(t, 1, len(g))
|
|
||||||
sort.Strings(g[0].Targets)
|
|
||||||
require.Equal(t, []string{"web_app"}, g[0].Targets)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestHCLCwdPrefix(t *testing.T) {
|
func TestHCLCwdPrefix(t *testing.T) {
|
||||||
fp := File{
|
fp := File{
|
||||||
Name: "docker-bake.hcl",
|
Name: "docker-bake.hcl",
|
||||||
@@ -424,363 +353,50 @@ func TestOverrideMerge(t *testing.T) {
|
|||||||
require.Equal(t, "type=registry", m["app"].Outputs[0])
|
require.Equal(t, "type=registry", m["app"].Outputs[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadContexts(t *testing.T) {
|
func TestReadTargetsMixed(t *testing.T) {
|
||||||
fp := File{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
|
||||||
target "base" {
|
|
||||||
contexts = {
|
|
||||||
foo: "bar"
|
|
||||||
abc: "def"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
target "app" {
|
|
||||||
inherits = ["base"]
|
|
||||||
contexts = {
|
|
||||||
foo: "baz"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`),
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.TODO()
|
|
||||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, 1, len(m))
|
|
||||||
_, ok := m["app"]
|
|
||||||
require.True(t, ok)
|
|
||||||
|
|
||||||
bo, err := TargetsToBuildOpt(m, &Input{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ctxs := bo["app"].Inputs.NamedContexts
|
|
||||||
require.Equal(t, 2, len(ctxs))
|
|
||||||
|
|
||||||
require.Equal(t, "baz", ctxs["foo"].Path)
|
|
||||||
require.Equal(t, "def", ctxs["abc"].Path)
|
|
||||||
|
|
||||||
m, _, err = ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"app.contexts.foo=bay", "base.contexts.ghi=jkl"}, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, 1, len(m))
|
|
||||||
_, ok = m["app"]
|
|
||||||
require.True(t, ok)
|
|
||||||
|
|
||||||
bo, err = TargetsToBuildOpt(m, &Input{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ctxs = bo["app"].Inputs.NamedContexts
|
|
||||||
require.Equal(t, 3, len(ctxs))
|
|
||||||
|
|
||||||
require.Equal(t, "bay", ctxs["foo"].Path)
|
|
||||||
require.Equal(t, "def", ctxs["abc"].Path)
|
|
||||||
require.Equal(t, "jkl", ctxs["ghi"].Path)
|
|
||||||
|
|
||||||
// test resetting base values
|
|
||||||
m, _, err = ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"app.contexts.foo="}, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, 1, len(m))
|
|
||||||
_, ok = m["app"]
|
|
||||||
require.True(t, ok)
|
|
||||||
|
|
||||||
bo, err = TargetsToBuildOpt(m, &Input{})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ctxs = bo["app"].Inputs.NamedContexts
|
|
||||||
require.Equal(t, 1, len(ctxs))
|
|
||||||
require.Equal(t, "def", ctxs["abc"].Path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadContextFromTargetUnknown(t *testing.T) {
|
|
||||||
fp := File{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
|
||||||
target "base" {
|
|
||||||
contexts = {
|
|
||||||
foo: "bar"
|
|
||||||
abc: "def"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
target "app" {
|
|
||||||
contexts = {
|
|
||||||
foo: "baz"
|
|
||||||
bar: "target:bar"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`),
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.TODO()
|
|
||||||
_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil)
|
|
||||||
require.Error(t, err)
|
|
||||||
require.Contains(t, err.Error(), "failed to find target bar")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadEmptyTargets(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
fp := File{
|
fTargetDefault := File{
|
||||||
Name: "docker-bake.hcl",
|
Name: "docker-bake2.hcl",
|
||||||
Data: []byte(`target "app1" {}`),
|
|
||||||
}
|
|
||||||
|
|
||||||
fp2 := File{
|
|
||||||
Name: "docker-compose.yml",
|
|
||||||
Data: []byte(`
|
|
||||||
services:
|
|
||||||
app2: {}
|
|
||||||
`),
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := context.TODO()
|
|
||||||
|
|
||||||
m, _, err := ReadTargets(ctx, []File{fp, fp2}, []string{"app1", "app2"}, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, 2, len(m))
|
|
||||||
_, ok := m["app1"]
|
|
||||||
require.True(t, ok)
|
|
||||||
_, ok = m["app2"]
|
|
||||||
require.True(t, ok)
|
|
||||||
|
|
||||||
require.Equal(t, "Dockerfile", *m["app1"].Dockerfile)
|
|
||||||
require.Equal(t, ".", *m["app1"].Context)
|
|
||||||
require.Equal(t, "Dockerfile", *m["app2"].Dockerfile)
|
|
||||||
require.Equal(t, ".", *m["app2"].Context)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadContextFromTargetChain(t *testing.T) {
|
|
||||||
ctx := context.TODO()
|
|
||||||
fp := File{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
|
||||||
target "base" {
|
|
||||||
}
|
|
||||||
target "mid" {
|
|
||||||
output = ["foo"]
|
|
||||||
contexts = {
|
|
||||||
parent: "target:base"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
target "app" {
|
|
||||||
contexts = {
|
|
||||||
foo: "baz"
|
|
||||||
bar: "target:mid"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
target "unused" {}
|
|
||||||
`),
|
|
||||||
}
|
|
||||||
|
|
||||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, 3, len(m))
|
|
||||||
app, ok := m["app"]
|
|
||||||
require.True(t, ok)
|
|
||||||
|
|
||||||
require.Equal(t, 2, len(app.Contexts))
|
|
||||||
|
|
||||||
mid, ok := m["mid"]
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, 0, len(mid.Outputs))
|
|
||||||
require.Equal(t, 1, len(mid.Contexts))
|
|
||||||
|
|
||||||
base, ok := m["base"]
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Equal(t, 0, len(base.Contexts))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadContextFromTargetInfiniteLoop(t *testing.T) {
|
|
||||||
ctx := context.TODO()
|
|
||||||
fp := File{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
|
||||||
target "mid" {
|
|
||||||
output = ["foo"]
|
|
||||||
contexts = {
|
|
||||||
parent: "target:app"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
target "app" {
|
|
||||||
contexts = {
|
|
||||||
foo: "baz"
|
|
||||||
bar: "target:mid"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`),
|
|
||||||
}
|
|
||||||
_, _, err := ReadTargets(ctx, []File{fp}, []string{"app", "mid"}, []string{}, nil)
|
|
||||||
require.Error(t, err)
|
|
||||||
require.Contains(t, err.Error(), "infinite loop from")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadContextFromTargetMultiPlatform(t *testing.T) {
|
|
||||||
ctx := context.TODO()
|
|
||||||
fp := File{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
|
||||||
target "mid" {
|
|
||||||
output = ["foo"]
|
|
||||||
platforms = ["linux/amd64", "linux/arm64"]
|
|
||||||
}
|
|
||||||
target "app" {
|
|
||||||
contexts = {
|
|
||||||
bar: "target:mid"
|
|
||||||
}
|
|
||||||
platforms = ["linux/amd64", "linux/arm64"]
|
|
||||||
}
|
|
||||||
`),
|
|
||||||
}
|
|
||||||
_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadContextFromTargetInvalidPlatforms(t *testing.T) {
|
|
||||||
ctx := context.TODO()
|
|
||||||
fp := File{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
|
||||||
target "mid" {
|
|
||||||
output = ["foo"]
|
|
||||||
platforms = ["linux/amd64", "linux/riscv64"]
|
|
||||||
}
|
|
||||||
target "app" {
|
|
||||||
contexts = {
|
|
||||||
bar: "target:mid"
|
|
||||||
}
|
|
||||||
platforms = ["linux/amd64", "linux/arm64"]
|
|
||||||
}
|
|
||||||
`),
|
|
||||||
}
|
|
||||||
_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil)
|
|
||||||
require.Error(t, err)
|
|
||||||
require.Contains(t, err.Error(), "defined for different platforms")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadTargetsDefault(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ctx := context.TODO()
|
|
||||||
|
|
||||||
f := File{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
Data: []byte(`
|
||||||
target "default" {
|
target "default" {
|
||||||
dockerfile = "test"
|
dockerfile = "test"
|
||||||
}`)}
|
}`)}
|
||||||
|
|
||||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil)
|
fTargetImage := File{
|
||||||
require.NoError(t, err)
|
Name: "docker-bake3.hcl",
|
||||||
require.Equal(t, 0, len(g))
|
|
||||||
require.Equal(t, 1, len(m))
|
|
||||||
require.Equal(t, "test", *m["default"].Dockerfile)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadTargetsSpecified(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ctx := context.TODO()
|
|
||||||
|
|
||||||
f := File{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
Data: []byte(`
|
||||||
target "image" {
|
target "image" {
|
||||||
dockerfile = "test"
|
dockerfile = "test"
|
||||||
}`)}
|
}`)}
|
||||||
|
|
||||||
_, _, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil)
|
fpHCL := File{
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"image"}, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(g))
|
|
||||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
|
||||||
require.Equal(t, 1, len(m))
|
|
||||||
require.Equal(t, "test", *m["image"].Dockerfile)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadTargetsGroup(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ctx := context.TODO()
|
|
||||||
|
|
||||||
f := File{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
|
||||||
group "foo" {
|
|
||||||
targets = ["image"]
|
|
||||||
}
|
|
||||||
target "image" {
|
|
||||||
dockerfile = "test"
|
|
||||||
}`)}
|
|
||||||
|
|
||||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(g))
|
|
||||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
|
||||||
require.Equal(t, 1, len(m))
|
|
||||||
require.Equal(t, "test", *m["image"].Dockerfile)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadTargetsGroupAndTarget(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ctx := context.TODO()
|
|
||||||
|
|
||||||
f := File{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
|
||||||
group "foo" {
|
|
||||||
targets = ["image"]
|
|
||||||
}
|
|
||||||
target "foo" {
|
|
||||||
dockerfile = "bar"
|
|
||||||
}
|
|
||||||
target "image" {
|
|
||||||
dockerfile = "test"
|
|
||||||
}`)}
|
|
||||||
|
|
||||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(g))
|
|
||||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
|
||||||
require.Equal(t, 1, len(m))
|
|
||||||
require.Equal(t, "test", *m["image"].Dockerfile)
|
|
||||||
|
|
||||||
m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "foo"}, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(g))
|
|
||||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
|
||||||
require.Equal(t, 1, len(m))
|
|
||||||
require.Equal(t, "test", *m["image"].Dockerfile)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadTargetsMixed(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ctx := context.TODO()
|
|
||||||
|
|
||||||
fhcl := File{
|
|
||||||
Name: "docker-bake.hcl",
|
Name: "docker-bake.hcl",
|
||||||
Data: []byte(`
|
Data: []byte(`
|
||||||
group "default" {
|
group "default" {
|
||||||
targets = ["image"]
|
targets = ["image"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target "nocache" {
|
target "nocache" {
|
||||||
no-cache = true
|
no-cache = true
|
||||||
}
|
}
|
||||||
|
|
||||||
group "release" {
|
group "release" {
|
||||||
targets = ["image-release"]
|
targets = ["image-release"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target "image" {
|
target "image" {
|
||||||
inherits = ["nocache"]
|
inherits = ["nocache"]
|
||||||
output = ["type=docker"]
|
output = ["type=docker"]
|
||||||
}
|
}
|
||||||
|
|
||||||
target "image-release" {
|
target "image-release" {
|
||||||
inherits = ["image"]
|
inherits = ["image"]
|
||||||
output = ["type=image,push=true"]
|
output = ["type=image,push=true"]
|
||||||
tags = ["user/app:latest"]
|
tags = ["user/app:latest"]
|
||||||
}`)}
|
}`)}
|
||||||
|
|
||||||
fyml := File{
|
fpYML := File{
|
||||||
Name: "docker-compose.yml",
|
Name: "docker-compose.yml",
|
||||||
Data: []byte(`
|
Data: []byte(`
|
||||||
services:
|
services:
|
||||||
@@ -796,6 +412,7 @@ services:
|
|||||||
- NODE_ENV=test
|
- NODE_ENV=test
|
||||||
- AWS_ACCESS_KEY_ID=dummy
|
- AWS_ACCESS_KEY_ID=dummy
|
||||||
- AWS_SECRET_ACCESS_KEY=dummy
|
- AWS_SECRET_ACCESS_KEY=dummy
|
||||||
|
|
||||||
aws:
|
aws:
|
||||||
build:
|
build:
|
||||||
dockerfile: ./aws.Dockerfile
|
dockerfile: ./aws.Dockerfile
|
||||||
@@ -804,7 +421,7 @@ services:
|
|||||||
CT_TAG: bar
|
CT_TAG: bar
|
||||||
image: ct-fake-aws:bar`)}
|
image: ct-fake-aws:bar`)}
|
||||||
|
|
||||||
fjson := File{
|
fpJSON := File{
|
||||||
Name: "docker-bake.json",
|
Name: "docker-bake.json",
|
||||||
Data: []byte(`{
|
Data: []byte(`{
|
||||||
"group": {
|
"group": {
|
||||||
@@ -825,15 +442,32 @@ services:
|
|||||||
}
|
}
|
||||||
}`)}
|
}`)}
|
||||||
|
|
||||||
m, g, err := ReadTargets(ctx, []File{fhcl}, []string{"default"}, nil, nil)
|
ctx := context.TODO()
|
||||||
|
|
||||||
|
m, g, err := ReadTargets(ctx, []File{fTargetDefault}, []string{"default"}, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 0, len(g))
|
||||||
|
require.Equal(t, 1, len(m))
|
||||||
|
require.Equal(t, "test", *m["default"].Dockerfile)
|
||||||
|
|
||||||
|
_, _, err = ReadTargets(ctx, []File{fTargetImage}, []string{"default"}, nil, nil)
|
||||||
|
require.Error(t, err)
|
||||||
|
|
||||||
|
m, g, err = ReadTargets(ctx, []File{fTargetImage}, []string{"image"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(g))
|
require.Equal(t, 1, len(g))
|
||||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||||
require.Equal(t, 1, len(m))
|
require.Equal(t, 1, len(m))
|
||||||
require.Equal(t, 1, len(m["image"].Outputs))
|
require.Equal(t, "test", *m["image"].Dockerfile)
|
||||||
require.Equal(t, "type=docker", m["image"].Outputs[0])
|
|
||||||
|
|
||||||
m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image-release"}, nil, nil)
|
m, g, err = ReadTargets(ctx, []File{fTargetImage}, []string{"image"}, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(g))
|
||||||
|
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||||
|
require.Equal(t, 1, len(m))
|
||||||
|
require.Equal(t, "test", *m["image"].Dockerfile)
|
||||||
|
|
||||||
|
m, g, err = ReadTargets(ctx, []File{fpHCL}, []string{"image-release"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(g))
|
require.Equal(t, 1, len(g))
|
||||||
require.Equal(t, []string{"image-release"}, g[0].Targets)
|
require.Equal(t, []string{"image-release"}, g[0].Targets)
|
||||||
@@ -841,7 +475,7 @@ services:
|
|||||||
require.Equal(t, 1, len(m["image-release"].Outputs))
|
require.Equal(t, 1, len(m["image-release"].Outputs))
|
||||||
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0])
|
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0])
|
||||||
|
|
||||||
m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image", "image-release"}, nil, nil)
|
m, g, err = ReadTargets(ctx, []File{fpHCL}, []string{"image", "image-release"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(g))
|
require.Equal(t, 1, len(g))
|
||||||
require.Equal(t, []string{"image", "image-release"}, g[0].Targets)
|
require.Equal(t, []string{"image", "image-release"}, g[0].Targets)
|
||||||
@@ -850,21 +484,21 @@ services:
|
|||||||
require.Equal(t, 1, len(m["image-release"].Outputs))
|
require.Equal(t, 1, len(m["image-release"].Outputs))
|
||||||
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0])
|
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0])
|
||||||
|
|
||||||
m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"default"}, nil, nil)
|
m, g, err = ReadTargets(ctx, []File{fpYML, fpHCL}, []string{"default"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(g))
|
require.Equal(t, 1, len(g))
|
||||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||||
require.Equal(t, 1, len(m))
|
require.Equal(t, 1, len(m))
|
||||||
require.Equal(t, ".", *m["image"].Context)
|
require.Equal(t, ".", *m["image"].Context)
|
||||||
|
|
||||||
m, g, err = ReadTargets(ctx, []File{fjson}, []string{"default"}, nil, nil)
|
m, g, err = ReadTargets(ctx, []File{fpJSON}, []string{"default"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(g))
|
require.Equal(t, 1, len(g))
|
||||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||||
require.Equal(t, 1, len(m))
|
require.Equal(t, 1, len(m))
|
||||||
require.Equal(t, ".", *m["image"].Context)
|
require.Equal(t, ".", *m["image"].Context)
|
||||||
|
|
||||||
m, g, err = ReadTargets(ctx, []File{fyml}, []string{"default"}, nil, nil)
|
m, g, err = ReadTargets(ctx, []File{fpYML}, []string{"default"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(g))
|
require.Equal(t, 1, len(g))
|
||||||
sort.Strings(g[0].Targets)
|
sort.Strings(g[0].Targets)
|
||||||
@@ -873,7 +507,7 @@ services:
|
|||||||
require.Equal(t, "./Dockerfile", *m["addon"].Dockerfile)
|
require.Equal(t, "./Dockerfile", *m["addon"].Dockerfile)
|
||||||
require.Equal(t, "./aws.Dockerfile", *m["aws"].Dockerfile)
|
require.Equal(t, "./aws.Dockerfile", *m["aws"].Dockerfile)
|
||||||
|
|
||||||
m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"addon", "aws"}, nil, nil)
|
m, g, err = ReadTargets(ctx, []File{fpYML, fpHCL}, []string{"addon", "aws"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(g))
|
require.Equal(t, 1, len(g))
|
||||||
sort.Strings(g[0].Targets)
|
sort.Strings(g[0].Targets)
|
||||||
@@ -882,7 +516,7 @@ services:
|
|||||||
require.Equal(t, "./Dockerfile", *m["addon"].Dockerfile)
|
require.Equal(t, "./Dockerfile", *m["addon"].Dockerfile)
|
||||||
require.Equal(t, "./aws.Dockerfile", *m["aws"].Dockerfile)
|
require.Equal(t, "./aws.Dockerfile", *m["aws"].Dockerfile)
|
||||||
|
|
||||||
m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"addon", "aws", "image"}, nil, nil)
|
m, g, err = ReadTargets(ctx, []File{fpYML, fpHCL}, []string{"addon", "aws", "image"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(g))
|
require.Equal(t, 1, len(g))
|
||||||
sort.Strings(g[0].Targets)
|
sort.Strings(g[0].Targets)
|
||||||
@@ -892,337 +526,3 @@ services:
|
|||||||
require.Equal(t, "./Dockerfile", *m["addon"].Dockerfile)
|
require.Equal(t, "./Dockerfile", *m["addon"].Dockerfile)
|
||||||
require.Equal(t, "./aws.Dockerfile", *m["aws"].Dockerfile)
|
require.Equal(t, "./aws.Dockerfile", *m["aws"].Dockerfile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestReadTargetsSameGroupTarget(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ctx := context.TODO()
|
|
||||||
|
|
||||||
f := File{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
|
||||||
group "foo" {
|
|
||||||
targets = ["foo"]
|
|
||||||
}
|
|
||||||
target "foo" {
|
|
||||||
dockerfile = "bar"
|
|
||||||
}
|
|
||||||
target "image" {
|
|
||||||
output = ["type=docker"]
|
|
||||||
}`)}
|
|
||||||
|
|
||||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(g))
|
|
||||||
require.Equal(t, []string{"foo"}, g[0].Targets)
|
|
||||||
require.Equal(t, 1, len(m))
|
|
||||||
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
|
||||||
|
|
||||||
m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "foo"}, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(g))
|
|
||||||
require.Equal(t, []string{"foo"}, g[0].Targets)
|
|
||||||
require.Equal(t, 1, len(m))
|
|
||||||
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestReadTargetsSameGroupTargetMulti(t *testing.T) {
|
|
||||||
t.Parallel()
|
|
||||||
ctx := context.TODO()
|
|
||||||
|
|
||||||
f := File{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
|
||||||
group "foo" {
|
|
||||||
targets = ["foo", "image"]
|
|
||||||
}
|
|
||||||
target "foo" {
|
|
||||||
dockerfile = "bar"
|
|
||||||
}
|
|
||||||
target "image" {
|
|
||||||
output = ["type=docker"]
|
|
||||||
}`)}
|
|
||||||
|
|
||||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(g))
|
|
||||||
require.Equal(t, []string{"foo", "image"}, g[0].Targets)
|
|
||||||
require.Equal(t, 2, len(m))
|
|
||||||
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
|
||||||
require.Equal(t, "type=docker", m["image"].Outputs[0])
|
|
||||||
|
|
||||||
m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "image"}, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(g))
|
|
||||||
require.Equal(t, []string{"foo", "image"}, g[0].Targets)
|
|
||||||
require.Equal(t, 2, len(m))
|
|
||||||
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
|
||||||
require.Equal(t, "type=docker", m["image"].Outputs[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNestedInherits(t *testing.T) {
|
|
||||||
ctx := context.TODO()
|
|
||||||
|
|
||||||
f := File{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
|
||||||
target "a" {
|
|
||||||
args = {
|
|
||||||
foo = "123"
|
|
||||||
bar = "234"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
target "b" {
|
|
||||||
inherits = ["a"]
|
|
||||||
args = {
|
|
||||||
bar = "567"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
target "c" {
|
|
||||||
inherits = ["a"]
|
|
||||||
args = {
|
|
||||||
baz = "890"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
target "d" {
|
|
||||||
inherits = ["b", "c"]
|
|
||||||
}`)}
|
|
||||||
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
overrides []string
|
|
||||||
want map[string]string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "nested simple",
|
|
||||||
overrides: nil,
|
|
||||||
want: map[string]string{"bar": "234", "baz": "890", "foo": "123"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "nested with overrides first",
|
|
||||||
overrides: []string{"a.args.foo=321", "b.args.bar=432"},
|
|
||||||
want: map[string]string{"bar": "234", "baz": "890", "foo": "321"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "nested with overrides last",
|
|
||||||
overrides: []string{"a.args.foo=321", "c.args.bar=432"},
|
|
||||||
want: map[string]string{"bar": "432", "baz": "890", "foo": "321"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range cases {
|
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"d"}, tt.overrides, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(g))
|
|
||||||
require.Equal(t, []string{"d"}, g[0].Targets)
|
|
||||||
require.Equal(t, 1, len(m))
|
|
||||||
require.Equal(t, tt.want, m["d"].Args)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNestedInheritsWithGroup(t *testing.T) {
|
|
||||||
ctx := context.TODO()
|
|
||||||
|
|
||||||
f := File{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
|
||||||
target "grandparent" {
|
|
||||||
output = ["type=docker"]
|
|
||||||
args = {
|
|
||||||
BAR = "fuu"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
target "parent" {
|
|
||||||
inherits = ["grandparent"]
|
|
||||||
args = {
|
|
||||||
FOO = "bar"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
target "child1" {
|
|
||||||
inherits = ["parent"]
|
|
||||||
}
|
|
||||||
target "child2" {
|
|
||||||
inherits = ["parent"]
|
|
||||||
args = {
|
|
||||||
FOO2 = "bar2"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
group "default" {
|
|
||||||
targets = [
|
|
||||||
"child1",
|
|
||||||
"child2"
|
|
||||||
]
|
|
||||||
}`)}
|
|
||||||
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
overrides []string
|
|
||||||
wantch1 map[string]string
|
|
||||||
wantch2 map[string]string
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "nested simple",
|
|
||||||
overrides: nil,
|
|
||||||
wantch1: map[string]string{"BAR": "fuu", "FOO": "bar"},
|
|
||||||
wantch2: map[string]string{"BAR": "fuu", "FOO": "bar", "FOO2": "bar2"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "nested with overrides first",
|
|
||||||
overrides: []string{"grandparent.args.BAR=fii", "child1.args.FOO=baaar"},
|
|
||||||
wantch1: map[string]string{"BAR": "fii", "FOO": "baaar"},
|
|
||||||
wantch2: map[string]string{"BAR": "fii", "FOO": "bar", "FOO2": "bar2"},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "nested with overrides last",
|
|
||||||
overrides: []string{"grandparent.args.BAR=fii", "child2.args.FOO=baaar"},
|
|
||||||
wantch1: map[string]string{"BAR": "fii", "FOO": "bar"},
|
|
||||||
wantch2: map[string]string{"BAR": "fii", "FOO": "baaar", "FOO2": "bar2"},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range cases {
|
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"default"}, tt.overrides, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(g))
|
|
||||||
require.Equal(t, []string{"child1", "child2"}, g[0].Targets)
|
|
||||||
require.Equal(t, 2, len(m))
|
|
||||||
require.Equal(t, tt.wantch1, m["child1"].Args)
|
|
||||||
require.Equal(t, []string{"type=docker"}, m["child1"].Outputs)
|
|
||||||
require.Equal(t, tt.wantch2, m["child2"].Args)
|
|
||||||
require.Equal(t, []string{"type=docker"}, m["child2"].Outputs)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestTargetName(t *testing.T) {
|
|
||||||
ctx := context.TODO()
|
|
||||||
cases := []struct {
|
|
||||||
target string
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
target: "a",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
target: "abc",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
target: "a/b",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
target: "a.b",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
target: "_a",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
target: "a_b",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
target: "AbC",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
target: "AbC-0123",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range cases {
|
|
||||||
tt := tt
|
|
||||||
t.Run(tt.target, func(t *testing.T) {
|
|
||||||
_, _, err := ReadTargets(ctx, []File{{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`target "` + tt.target + `" {}`),
|
|
||||||
}}, []string{tt.target}, nil, nil)
|
|
||||||
if tt.wantErr {
|
|
||||||
require.Error(t, err)
|
|
||||||
} else {
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNestedGroupsWithSameTarget(t *testing.T) {
|
|
||||||
ctx := context.TODO()
|
|
||||||
|
|
||||||
f := File{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
|
||||||
group "a" {
|
|
||||||
targets = ["b", "c"]
|
|
||||||
}
|
|
||||||
|
|
||||||
group "b" {
|
|
||||||
targets = ["d"]
|
|
||||||
}
|
|
||||||
|
|
||||||
group "c" {
|
|
||||||
targets = ["b"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "d" {
|
|
||||||
context = "."
|
|
||||||
dockerfile = "./testdockerfile"
|
|
||||||
}
|
|
||||||
|
|
||||||
group "e" {
|
|
||||||
targets = ["a", "f"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "f" {
|
|
||||||
context = "./foo"
|
|
||||||
}`)}
|
|
||||||
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
targets []string
|
|
||||||
ntargets int
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "a",
|
|
||||||
targets: []string{"b", "c"},
|
|
||||||
ntargets: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "b",
|
|
||||||
targets: []string{"d"},
|
|
||||||
ntargets: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "c",
|
|
||||||
targets: []string{"b"},
|
|
||||||
ntargets: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "d",
|
|
||||||
targets: []string{"d"},
|
|
||||||
ntargets: 1,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "e",
|
|
||||||
targets: []string{"a", "f"},
|
|
||||||
ntargets: 2,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range cases {
|
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
m, g, err := ReadTargets(ctx, []File{f}, []string{tt.name}, nil, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(g))
|
|
||||||
require.Equal(t, tt.targets, g[0].Targets)
|
|
||||||
require.Equal(t, tt.ntargets, len(m))
|
|
||||||
require.Equal(t, ".", *m["d"].Context)
|
|
||||||
require.Equal(t, "./testdockerfile", *m["d"].Dockerfile)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
296
bake/compose.go
296
bake/compose.go
@@ -3,39 +3,46 @@ package bake
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/compose-spec/compose-go/dotenv"
|
|
||||||
"github.com/compose-spec/compose-go/loader"
|
"github.com/compose-spec/compose-go/loader"
|
||||||
compose "github.com/compose-spec/compose-go/types"
|
compose "github.com/compose-spec/compose-go/types"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"gopkg.in/yaml.v3"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// errComposeInvalid is returned when a compose file is invalid
|
func parseCompose(dt []byte) (*compose.Project, error) {
|
||||||
var errComposeInvalid = errors.New("invalid compose file")
|
return loader.Load(compose.ConfigDetails{
|
||||||
|
|
||||||
func ParseCompose(dt []byte, envs map[string]string) (*Config, error) {
|
|
||||||
cfg, err := loader.Load(compose.ConfigDetails{
|
|
||||||
ConfigFiles: []compose.ConfigFile{
|
ConfigFiles: []compose.ConfigFile{
|
||||||
{
|
{
|
||||||
Content: dt,
|
Content: dt,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Environment: envs,
|
Environment: envMap(os.Environ()),
|
||||||
}, func(options *loader.Options) {
|
}, func(options *loader.Options) {
|
||||||
options.SkipNormalization = true
|
options.SkipNormalization = true
|
||||||
options.SkipConsistencyCheck = true
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
}
|
||||||
return nil, err
|
|
||||||
|
func envMap(env []string) map[string]string {
|
||||||
|
result := make(map[string]string, len(env))
|
||||||
|
for _, s := range env {
|
||||||
|
kv := strings.SplitN(s, "=", 2)
|
||||||
|
if len(kv) != 2 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
result[kv[0]] = kv[1]
|
||||||
}
|
}
|
||||||
if err = composeValidate(cfg); err != nil {
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseCompose(dt []byte) (*Config, error) {
|
||||||
|
cfg, err := parseCompose(dt)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var c Config
|
var c Config
|
||||||
|
var zeroBuildConfig compose.BuildConfig
|
||||||
if len(cfg.Services) > 0 {
|
if len(cfg.Services) > 0 {
|
||||||
c.Groups = []*Group{}
|
c.Groups = []*Group{}
|
||||||
c.Targets = []*Target{}
|
c.Targets = []*Target{}
|
||||||
@@ -43,13 +50,13 @@ func ParseCompose(dt []byte, envs map[string]string) (*Config, error) {
|
|||||||
g := &Group{Name: "default"}
|
g := &Group{Name: "default"}
|
||||||
|
|
||||||
for _, s := range cfg.Services {
|
for _, s := range cfg.Services {
|
||||||
if s.Build == nil {
|
|
||||||
s.Build = &compose.BuildConfig{}
|
|
||||||
}
|
|
||||||
|
|
||||||
targetName := sanitizeTargetName(s.Name)
|
if s.Build == nil || reflect.DeepEqual(s.Build, zeroBuildConfig) {
|
||||||
if err = validateTargetName(targetName); err != nil {
|
// if not make sure they're setting an image or it's invalid d-c.yml
|
||||||
return nil, errors.Wrapf(err, "invalid service name %q", targetName)
|
if s.Image == "" {
|
||||||
|
return nil, fmt.Errorf("compose file invalid: service %s has neither an image nor a build context specified. At least one must be provided", s.Name)
|
||||||
|
}
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
var contextPathP *string
|
var contextPathP *string
|
||||||
@@ -62,34 +69,17 @@ func ParseCompose(dt []byte, envs map[string]string) (*Config, error) {
|
|||||||
dockerfilePath := s.Build.Dockerfile
|
dockerfilePath := s.Build.Dockerfile
|
||||||
dockerfilePathP = &dockerfilePath
|
dockerfilePathP = &dockerfilePath
|
||||||
}
|
}
|
||||||
|
g.Targets = append(g.Targets, s.Name)
|
||||||
var secrets []string
|
|
||||||
for _, bs := range s.Build.Secrets {
|
|
||||||
secret, err := composeToBuildkitSecret(bs, cfg.Secrets[bs.Source])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
secrets = append(secrets, secret)
|
|
||||||
}
|
|
||||||
|
|
||||||
g.Targets = append(g.Targets, targetName)
|
|
||||||
t := &Target{
|
t := &Target{
|
||||||
Name: targetName,
|
Name: s.Name,
|
||||||
Context: contextPathP,
|
Context: contextPathP,
|
||||||
Dockerfile: dockerfilePathP,
|
Dockerfile: dockerfilePathP,
|
||||||
Tags: s.Build.Tags,
|
|
||||||
Labels: s.Build.Labels,
|
Labels: s.Build.Labels,
|
||||||
Args: flatten(s.Build.Args.Resolve(func(val string) (string, bool) {
|
Args: flatten(s.Build.Args.Resolve(func(val string) (string, bool) {
|
||||||
if val, ok := s.Environment[val]; ok && val != nil {
|
|
||||||
return *val, true
|
|
||||||
}
|
|
||||||
val, ok := cfg.Environment[val]
|
val, ok := cfg.Environment[val]
|
||||||
return val, ok
|
return val, ok
|
||||||
})),
|
})),
|
||||||
CacheFrom: s.Build.CacheFrom,
|
CacheFrom: s.Build.CacheFrom,
|
||||||
CacheTo: s.Build.CacheTo,
|
|
||||||
NetworkMode: &s.Build.Network,
|
|
||||||
Secrets: secrets,
|
|
||||||
}
|
}
|
||||||
if err = t.composeExtTarget(s.Build.Extensions); err != nil {
|
if err = t.composeExtTarget(s.Build.Extensions); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -110,42 +100,6 @@ func ParseCompose(dt []byte, envs map[string]string) (*Config, error) {
|
|||||||
return &c, nil
|
return &c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadDotEnv(curenv map[string]string, workingDir string) (map[string]string, error) {
|
|
||||||
if curenv == nil {
|
|
||||||
curenv = make(map[string]string)
|
|
||||||
}
|
|
||||||
|
|
||||||
ef, err := filepath.Abs(filepath.Join(workingDir, ".env"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = os.Stat(ef); os.IsNotExist(err) {
|
|
||||||
return curenv, nil
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
dt, err := os.ReadFile(ef)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
envs, err := dotenv.UnmarshalBytes(dt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range envs {
|
|
||||||
if _, set := curenv[k]; set {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
curenv[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
return curenv, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func flatten(in compose.MappingWithEquals) compose.Mapping {
|
func flatten(in compose.MappingWithEquals) compose.Mapping {
|
||||||
if len(in) == 0 {
|
if len(in) == 0 {
|
||||||
return nil
|
return nil
|
||||||
@@ -160,133 +114,81 @@ func flatten(in compose.MappingWithEquals) compose.Mapping {
|
|||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// xbake Compose build extension provides fields not (yet) available in
|
|
||||||
// Compose build specification: https://github.com/compose-spec/compose-spec/blob/master/build.md
|
|
||||||
type xbake struct {
|
|
||||||
Tags stringArray `yaml:"tags,omitempty"`
|
|
||||||
CacheFrom stringArray `yaml:"cache-from,omitempty"`
|
|
||||||
CacheTo stringArray `yaml:"cache-to,omitempty"`
|
|
||||||
Secrets stringArray `yaml:"secret,omitempty"`
|
|
||||||
SSH stringArray `yaml:"ssh,omitempty"`
|
|
||||||
Platforms stringArray `yaml:"platforms,omitempty"`
|
|
||||||
Outputs stringArray `yaml:"output,omitempty"`
|
|
||||||
Pull *bool `yaml:"pull,omitempty"`
|
|
||||||
NoCache *bool `yaml:"no-cache,omitempty"`
|
|
||||||
NoCacheFilter stringArray `yaml:"no-cache-filter,omitempty"`
|
|
||||||
Contexts stringMap `yaml:"contexts,omitempty"`
|
|
||||||
// don't forget to update documentation if you add a new field:
|
|
||||||
// docs/guides/bake/compose-file.md#extension-field-with-x-bake
|
|
||||||
}
|
|
||||||
|
|
||||||
type stringMap map[string]string
|
|
||||||
type stringArray []string
|
|
||||||
|
|
||||||
func (sa *stringArray) UnmarshalYAML(unmarshal func(interface{}) error) error {
|
|
||||||
var multi []string
|
|
||||||
err := unmarshal(&multi)
|
|
||||||
if err != nil {
|
|
||||||
var single string
|
|
||||||
if err := unmarshal(&single); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*sa = strings.Fields(single)
|
|
||||||
} else {
|
|
||||||
*sa = multi
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// composeExtTarget converts Compose build extension x-bake to bake Target
|
// composeExtTarget converts Compose build extension x-bake to bake Target
|
||||||
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
|
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
|
||||||
func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
||||||
var xb xbake
|
if ext, ok := exts["x-bake"]; ok {
|
||||||
|
for key, val := range ext.(map[string]interface{}) {
|
||||||
ext, ok := exts["x-bake"]
|
switch key {
|
||||||
if !ok || ext == nil {
|
case "tags":
|
||||||
return nil
|
if res, k := val.(string); k {
|
||||||
}
|
t.Tags = append(t.Tags, res)
|
||||||
|
} else {
|
||||||
yb, _ := yaml.Marshal(ext)
|
for _, res := range val.([]interface{}) {
|
||||||
if err := yaml.Unmarshal(yb, &xb); err != nil {
|
t.Tags = append(t.Tags, res.(string))
|
||||||
return err
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if len(xb.Tags) > 0 {
|
|
||||||
t.Tags = dedupSlice(append(t.Tags, xb.Tags...))
|
|
||||||
}
|
|
||||||
if len(xb.CacheFrom) > 0 {
|
|
||||||
t.CacheFrom = dedupSlice(append(t.CacheFrom, xb.CacheFrom...))
|
|
||||||
}
|
|
||||||
if len(xb.CacheTo) > 0 {
|
|
||||||
t.CacheTo = dedupSlice(append(t.CacheTo, xb.CacheTo...))
|
|
||||||
}
|
|
||||||
if len(xb.Secrets) > 0 {
|
|
||||||
t.Secrets = dedupSlice(append(t.Secrets, xb.Secrets...))
|
|
||||||
}
|
|
||||||
if len(xb.SSH) > 0 {
|
|
||||||
t.SSH = dedupSlice(append(t.SSH, xb.SSH...))
|
|
||||||
}
|
|
||||||
if len(xb.Platforms) > 0 {
|
|
||||||
t.Platforms = dedupSlice(append(t.Platforms, xb.Platforms...))
|
|
||||||
}
|
|
||||||
if len(xb.Outputs) > 0 {
|
|
||||||
t.Outputs = dedupSlice(append(t.Outputs, xb.Outputs...))
|
|
||||||
}
|
|
||||||
if xb.Pull != nil {
|
|
||||||
t.Pull = xb.Pull
|
|
||||||
}
|
|
||||||
if xb.NoCache != nil {
|
|
||||||
t.NoCache = xb.NoCache
|
|
||||||
}
|
|
||||||
if len(xb.NoCacheFilter) > 0 {
|
|
||||||
t.NoCacheFilter = dedupSlice(append(t.NoCacheFilter, xb.NoCacheFilter...))
|
|
||||||
}
|
|
||||||
if len(xb.Contexts) > 0 {
|
|
||||||
t.Contexts = dedupMap(t.Contexts, xb.Contexts)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// composeValidate validates a compose file
|
|
||||||
func composeValidate(project *compose.Project) error {
|
|
||||||
for _, s := range project.Services {
|
|
||||||
if s.Build != nil {
|
|
||||||
for _, secret := range s.Build.Secrets {
|
|
||||||
if _, ok := project.Secrets[secret.Source]; !ok {
|
|
||||||
return errors.Wrap(errComposeInvalid, fmt.Sprintf("service %q refers to undefined build secret %s", sanitizeTargetName(s.Name), secret.Source))
|
|
||||||
}
|
}
|
||||||
|
case "cache-from":
|
||||||
|
t.CacheFrom = []string{} // Needed to override the main field
|
||||||
|
if res, k := val.(string); k {
|
||||||
|
t.CacheFrom = append(t.CacheFrom, res)
|
||||||
|
} else {
|
||||||
|
for _, res := range val.([]interface{}) {
|
||||||
|
t.CacheFrom = append(t.CacheFrom, res.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "cache-to":
|
||||||
|
if res, k := val.(string); k {
|
||||||
|
t.CacheTo = append(t.CacheTo, res)
|
||||||
|
} else {
|
||||||
|
for _, res := range val.([]interface{}) {
|
||||||
|
t.CacheTo = append(t.CacheTo, res.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "secret":
|
||||||
|
if res, k := val.(string); k {
|
||||||
|
t.Secrets = append(t.Secrets, res)
|
||||||
|
} else {
|
||||||
|
for _, res := range val.([]interface{}) {
|
||||||
|
t.Secrets = append(t.Secrets, res.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "ssh":
|
||||||
|
if res, k := val.(string); k {
|
||||||
|
t.SSH = append(t.SSH, res)
|
||||||
|
} else {
|
||||||
|
for _, res := range val.([]interface{}) {
|
||||||
|
t.SSH = append(t.SSH, res.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "platforms":
|
||||||
|
if res, k := val.(string); k {
|
||||||
|
t.Platforms = append(t.Platforms, res)
|
||||||
|
} else {
|
||||||
|
for _, res := range val.([]interface{}) {
|
||||||
|
t.Platforms = append(t.Platforms, res.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "output":
|
||||||
|
if res, k := val.(string); k {
|
||||||
|
t.Outputs = append(t.Outputs, res)
|
||||||
|
} else {
|
||||||
|
for _, res := range val.([]interface{}) {
|
||||||
|
t.Outputs = append(t.Outputs, res.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "pull":
|
||||||
|
if res, ok := val.(bool); ok {
|
||||||
|
t.Pull = &res
|
||||||
|
}
|
||||||
|
case "no-cache":
|
||||||
|
if res, ok := val.(bool); ok {
|
||||||
|
t.NoCache = &res
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("compose file invalid: unkwown %s field for x-bake", key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for name, secret := range project.Secrets {
|
|
||||||
if secret.External.External {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if secret.File == "" && secret.Environment == "" {
|
|
||||||
return errors.Wrap(errComposeInvalid, fmt.Sprintf("secret %q must declare either `file` or `environment`", name))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// composeToBuildkitSecret converts secret from compose format to buildkit's
|
|
||||||
// csv format.
|
|
||||||
func composeToBuildkitSecret(inp compose.ServiceSecretConfig, psecret compose.SecretConfig) (string, error) {
|
|
||||||
if psecret.External.External {
|
|
||||||
return "", errors.Errorf("unsupported external secret %s", psecret.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
var bkattrs []string
|
|
||||||
if inp.Source != "" {
|
|
||||||
bkattrs = append(bkattrs, "id="+inp.Source)
|
|
||||||
}
|
|
||||||
if psecret.File != "" {
|
|
||||||
bkattrs = append(bkattrs, "src="+psecret.File)
|
|
||||||
}
|
|
||||||
if psecret.Environment != "" {
|
|
||||||
bkattrs = append(bkattrs, "env="+psecret.Environment)
|
|
||||||
}
|
|
||||||
|
|
||||||
return strings.Join(bkattrs, ","), nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package bake
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -20,29 +19,15 @@ services:
|
|||||||
build:
|
build:
|
||||||
context: ./dir
|
context: ./dir
|
||||||
dockerfile: Dockerfile-alternate
|
dockerfile: Dockerfile-alternate
|
||||||
network:
|
|
||||||
none
|
|
||||||
args:
|
args:
|
||||||
buildno: 123
|
buildno: 123
|
||||||
cache_from:
|
|
||||||
- type=local,src=path/to/cache
|
|
||||||
cache_to:
|
|
||||||
- type=local,dest=path/to/cache
|
|
||||||
secrets:
|
|
||||||
- token
|
|
||||||
- aws
|
|
||||||
secrets:
|
|
||||||
token:
|
|
||||||
environment: ENV_TOKEN
|
|
||||||
aws:
|
|
||||||
file: /root/.aws/credentials
|
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt, nil)
|
c, err := ParseCompose(dt)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 1, len(c.Groups))
|
||||||
require.Equal(t, "default", c.Groups[0].Name)
|
require.Equal(t, c.Groups[0].Name, "default")
|
||||||
sort.Strings(c.Groups[0].Targets)
|
sort.Strings(c.Groups[0].Targets)
|
||||||
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
@@ -52,20 +37,12 @@ secrets:
|
|||||||
})
|
})
|
||||||
require.Equal(t, "db", c.Targets[0].Name)
|
require.Equal(t, "db", c.Targets[0].Name)
|
||||||
require.Equal(t, "./db", *c.Targets[0].Context)
|
require.Equal(t, "./db", *c.Targets[0].Context)
|
||||||
require.Equal(t, []string{"docker.io/tonistiigi/db"}, c.Targets[0].Tags)
|
|
||||||
|
|
||||||
require.Equal(t, "webapp", c.Targets[1].Name)
|
require.Equal(t, "webapp", c.Targets[1].Name)
|
||||||
require.Equal(t, "./dir", *c.Targets[1].Context)
|
require.Equal(t, "./dir", *c.Targets[1].Context)
|
||||||
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
||||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||||
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
||||||
require.Equal(t, []string{"type=local,src=path/to/cache"}, c.Targets[1].CacheFrom)
|
|
||||||
require.Equal(t, []string{"type=local,dest=path/to/cache"}, c.Targets[1].CacheTo)
|
|
||||||
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
|
||||||
require.Equal(t, []string{
|
|
||||||
"id=token,env=ENV_TOKEN",
|
|
||||||
"id=aws,src=/root/.aws/credentials",
|
|
||||||
}, c.Targets[1].Secrets)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoBuildOutOfTreeService(t *testing.T) {
|
func TestNoBuildOutOfTreeService(t *testing.T) {
|
||||||
@@ -76,7 +53,7 @@ services:
|
|||||||
webapp:
|
webapp:
|
||||||
build: ./db
|
build: ./db
|
||||||
`)
|
`)
|
||||||
c, err := ParseCompose(dt, nil)
|
c, err := ParseCompose(dt)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 1, len(c.Groups))
|
||||||
}
|
}
|
||||||
@@ -94,7 +71,7 @@ services:
|
|||||||
target: webapp
|
target: webapp
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt, nil)
|
c, err := ParseCompose(dt)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
@@ -119,15 +96,15 @@ services:
|
|||||||
target: webapp
|
target: webapp
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt, nil)
|
c, err := ParseCompose(dt)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
sort.Slice(c.Targets, func(i, j int) bool {
|
sort.Slice(c.Targets, func(i, j int) bool {
|
||||||
return c.Targets[i].Name < c.Targets[j].Name
|
return c.Targets[i].Name < c.Targets[j].Name
|
||||||
})
|
})
|
||||||
require.Equal(t, "db", c.Targets[0].Name)
|
require.Equal(t, c.Targets[0].Name, "db")
|
||||||
require.Equal(t, "db", *c.Targets[0].Target)
|
require.Equal(t, "db", *c.Targets[0].Target)
|
||||||
require.Equal(t, "webapp", c.Targets[1].Name)
|
require.Equal(t, c.Targets[1].Name, "webapp")
|
||||||
require.Equal(t, "webapp", *c.Targets[1].Target)
|
require.Equal(t, "webapp", *c.Targets[1].Target)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -153,22 +130,28 @@ services:
|
|||||||
os.Setenv("ZZZ_BAR", "zzz_foo")
|
os.Setenv("ZZZ_BAR", "zzz_foo")
|
||||||
defer os.Unsetenv("ZZZ_BAR")
|
defer os.Unsetenv("ZZZ_BAR")
|
||||||
|
|
||||||
c, err := ParseCompose(dt, sliceToMap(os.Environ()))
|
c, err := ParseCompose(dt)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "bar", c.Targets[0].Args["FOO"])
|
require.Equal(t, c.Targets[0].Args["FOO"], "bar")
|
||||||
require.Equal(t, "zzz_foo", c.Targets[0].Args["BAR"])
|
require.Equal(t, c.Targets[0].Args["BAR"], "zzz_foo")
|
||||||
require.Equal(t, "FOO", c.Targets[0].Args["BRB"])
|
require.Equal(t, c.Targets[0].Args["BRB"], "FOO")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInconsistentComposeFile(t *testing.T) {
|
func TestBogusCompose(t *testing.T) {
|
||||||
var dt = []byte(`
|
var dt = []byte(`
|
||||||
services:
|
services:
|
||||||
|
db:
|
||||||
|
labels:
|
||||||
|
- "foo"
|
||||||
webapp:
|
webapp:
|
||||||
entrypoint: echo 1
|
build:
|
||||||
|
context: .
|
||||||
|
target: webapp
|
||||||
`)
|
`)
|
||||||
|
|
||||||
_, err := ParseCompose(dt, nil)
|
_, err := ParseCompose(dt)
|
||||||
require.NoError(t, err)
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "has neither an image nor a build context specified: invalid compose project")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAdvancedNetwork(t *testing.T) {
|
func TestAdvancedNetwork(t *testing.T) {
|
||||||
@@ -192,28 +175,10 @@ networks:
|
|||||||
gateway: 10.5.0.254
|
gateway: 10.5.0.254
|
||||||
`)
|
`)
|
||||||
|
|
||||||
_, err := ParseCompose(dt, nil)
|
_, err := ParseCompose(dt)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTags(t *testing.T) {
|
|
||||||
var dt = []byte(`
|
|
||||||
services:
|
|
||||||
example:
|
|
||||||
image: example
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: Dockerfile
|
|
||||||
tags:
|
|
||||||
- foo
|
|
||||||
- bar
|
|
||||||
`)
|
|
||||||
|
|
||||||
c, err := ParseCompose(dt, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, []string{"foo", "bar"}, c.Targets[0].Tags)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDependsOnList(t *testing.T) {
|
func TestDependsOnList(t *testing.T) {
|
||||||
var dt = []byte(`
|
var dt = []byte(`
|
||||||
version: "3.8"
|
version: "3.8"
|
||||||
@@ -246,7 +211,7 @@ networks:
|
|||||||
name: test-net
|
name: test-net
|
||||||
`)
|
`)
|
||||||
|
|
||||||
_, err := ParseCompose(dt, nil)
|
_, err := ParseCompose(dt)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -260,16 +225,10 @@ services:
|
|||||||
dockerfile: ./Dockerfile
|
dockerfile: ./Dockerfile
|
||||||
cache_from:
|
cache_from:
|
||||||
- user/app:cache
|
- user/app:cache
|
||||||
cache_to:
|
|
||||||
- user/app:cache
|
|
||||||
tags:
|
|
||||||
- ct-addon:baz
|
|
||||||
args:
|
args:
|
||||||
CT_ECR: foo
|
CT_ECR: foo
|
||||||
CT_TAG: bar
|
CT_TAG: bar
|
||||||
x-bake:
|
x-bake:
|
||||||
contexts:
|
|
||||||
alpine: docker-image://alpine:3.13
|
|
||||||
tags:
|
tags:
|
||||||
- ct-addon:foo
|
- ct-addon:foo
|
||||||
- ct-addon:alp
|
- ct-addon:alp
|
||||||
@@ -278,8 +237,7 @@ services:
|
|||||||
- linux/arm64
|
- linux/arm64
|
||||||
cache-from:
|
cache-from:
|
||||||
- type=local,src=path/to/cache
|
- type=local,src=path/to/cache
|
||||||
cache-to:
|
cache-to: local,dest=path/to/cache
|
||||||
- type=local,dest=path/to/cache
|
|
||||||
pull: true
|
pull: true
|
||||||
|
|
||||||
aws:
|
aws:
|
||||||
@@ -299,267 +257,27 @@ services:
|
|||||||
no-cache: true
|
no-cache: true
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseCompose(dt, nil)
|
c, err := ParseCompose(dt)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 2, len(c.Targets))
|
require.Equal(t, 2, len(c.Targets))
|
||||||
sort.Slice(c.Targets, func(i, j int) bool {
|
sort.Slice(c.Targets, func(i, j int) bool {
|
||||||
return c.Targets[i].Name < c.Targets[j].Name
|
return c.Targets[i].Name < c.Targets[j].Name
|
||||||
})
|
})
|
||||||
require.Equal(t, map[string]string{"CT_ECR": "foo", "CT_TAG": "bar"}, c.Targets[0].Args)
|
require.Equal(t, c.Targets[0].Args, map[string]string{"CT_ECR": "foo", "CT_TAG": "bar"})
|
||||||
require.Equal(t, []string{"ct-addon:baz", "ct-addon:foo", "ct-addon:alp"}, c.Targets[0].Tags)
|
require.Equal(t, c.Targets[0].Tags, []string{"ct-addon:foo", "ct-addon:alp"})
|
||||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[0].Platforms)
|
require.Equal(t, c.Targets[0].Platforms, []string{"linux/amd64", "linux/arm64"})
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
require.Equal(t, c.Targets[0].CacheFrom, []string{"type=local,src=path/to/cache"})
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
require.Equal(t, c.Targets[0].CacheTo, []string{"local,dest=path/to/cache"})
|
||||||
require.Equal(t, newBool(true), c.Targets[0].Pull)
|
require.Equal(t, c.Targets[0].Pull, newBool(true))
|
||||||
require.Equal(t, map[string]string{"alpine": "docker-image://alpine:3.13"}, c.Targets[0].Contexts)
|
require.Equal(t, c.Targets[1].Tags, []string{"ct-fake-aws:bar"})
|
||||||
require.Equal(t, []string{"ct-fake-aws:bar"}, c.Targets[1].Tags)
|
require.Equal(t, c.Targets[1].Secrets, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"})
|
||||||
require.Equal(t, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"}, c.Targets[1].Secrets)
|
require.Equal(t, c.Targets[1].SSH, []string{"default"})
|
||||||
require.Equal(t, []string{"default"}, c.Targets[1].SSH)
|
require.Equal(t, c.Targets[1].Platforms, []string{"linux/arm64"})
|
||||||
require.Equal(t, []string{"linux/arm64"}, c.Targets[1].Platforms)
|
require.Equal(t, c.Targets[1].Outputs, []string{"type=docker"})
|
||||||
require.Equal(t, []string{"type=docker"}, c.Targets[1].Outputs)
|
require.Equal(t, c.Targets[1].NoCache, newBool(true))
|
||||||
require.Equal(t, newBool(true), c.Targets[1].NoCache)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestComposeExtDedup(t *testing.T) {
|
|
||||||
var dt = []byte(`
|
|
||||||
services:
|
|
||||||
webapp:
|
|
||||||
image: app:bar
|
|
||||||
build:
|
|
||||||
cache_from:
|
|
||||||
- user/app:cache
|
|
||||||
cache_to:
|
|
||||||
- user/app:cache
|
|
||||||
tags:
|
|
||||||
- ct-addon:foo
|
|
||||||
x-bake:
|
|
||||||
tags:
|
|
||||||
- ct-addon:foo
|
|
||||||
- ct-addon:baz
|
|
||||||
cache-from:
|
|
||||||
- user/app:cache
|
|
||||||
- type=local,src=path/to/cache
|
|
||||||
cache-to:
|
|
||||||
- type=local,dest=path/to/cache
|
|
||||||
`)
|
|
||||||
|
|
||||||
c, err := ParseCompose(dt, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
|
||||||
require.Equal(t, []string{"ct-addon:foo", "ct-addon:baz"}, c.Targets[0].Tags)
|
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,src=path/to/cache"}, c.Targets[0].CacheFrom)
|
|
||||||
require.Equal(t, []string{"user/app:cache", "type=local,dest=path/to/cache"}, c.Targets[0].CacheTo)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEnv(t *testing.T) {
|
|
||||||
envf, err := os.CreateTemp("", "env")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer os.Remove(envf.Name())
|
|
||||||
|
|
||||||
_, err = envf.WriteString("FOO=bsdf -csdf\n")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var dt = []byte(`
|
|
||||||
services:
|
|
||||||
scratch:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
args:
|
|
||||||
CT_ECR: foo
|
|
||||||
FOO:
|
|
||||||
NODE_ENV:
|
|
||||||
environment:
|
|
||||||
- NODE_ENV=test
|
|
||||||
- AWS_ACCESS_KEY_ID=dummy
|
|
||||||
- AWS_SECRET_ACCESS_KEY=dummy
|
|
||||||
env_file:
|
|
||||||
- ` + envf.Name() + `
|
|
||||||
`)
|
|
||||||
|
|
||||||
c, err := ParseCompose(dt, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, map[string]string{"CT_ECR": "foo", "FOO": "bsdf -csdf", "NODE_ENV": "test"}, c.Targets[0].Args)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDotEnv(t *testing.T) {
|
|
||||||
tmpdir := t.TempDir()
|
|
||||||
|
|
||||||
err := os.WriteFile(filepath.Join(tmpdir, ".env"), []byte("FOO=bar"), 0644)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var dt = []byte(`
|
|
||||||
services:
|
|
||||||
scratch:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
args:
|
|
||||||
FOO:
|
|
||||||
`)
|
|
||||||
|
|
||||||
chdir(t, tmpdir)
|
|
||||||
c, _, err := ParseComposeFile(dt, "docker-compose.yml")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, map[string]string{"FOO": "bar"}, c.Targets[0].Args)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPorts(t *testing.T) {
|
|
||||||
var dt = []byte(`
|
|
||||||
services:
|
|
||||||
foo:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
ports:
|
|
||||||
- 3306:3306
|
|
||||||
bar:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
ports:
|
|
||||||
- mode: ingress
|
|
||||||
target: 3306
|
|
||||||
published: "3306"
|
|
||||||
protocol: tcp
|
|
||||||
`)
|
|
||||||
_, err := ParseCompose(dt, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newBool(val bool) *bool {
|
func newBool(val bool) *bool {
|
||||||
b := val
|
b := val
|
||||||
return &b
|
return &b
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServiceName(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
svc string
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
svc: "a",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
svc: "abc",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
svc: "a.b",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
svc: "_a",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
svc: "a_b",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
svc: "AbC",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
svc: "AbC-0123",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range cases {
|
|
||||||
tt := tt
|
|
||||||
t.Run(tt.svc, func(t *testing.T) {
|
|
||||||
_, err := ParseCompose([]byte(`
|
|
||||||
services:
|
|
||||||
`+tt.svc+`:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
`), nil)
|
|
||||||
if tt.wantErr {
|
|
||||||
require.Error(t, err)
|
|
||||||
} else {
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateComposeSecret(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
dt []byte
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "secret set by file",
|
|
||||||
dt: []byte(`
|
|
||||||
secrets:
|
|
||||||
foo:
|
|
||||||
file: .secret
|
|
||||||
`),
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "secret set by environment",
|
|
||||||
dt: []byte(`
|
|
||||||
secrets:
|
|
||||||
foo:
|
|
||||||
environment: TOKEN
|
|
||||||
`),
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "external secret",
|
|
||||||
dt: []byte(`
|
|
||||||
secrets:
|
|
||||||
foo:
|
|
||||||
external: true
|
|
||||||
`),
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "unset secret",
|
|
||||||
dt: []byte(`
|
|
||||||
secrets:
|
|
||||||
foo: {}
|
|
||||||
`),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "undefined secret",
|
|
||||||
dt: []byte(`
|
|
||||||
services:
|
|
||||||
foo:
|
|
||||||
build:
|
|
||||||
secrets:
|
|
||||||
- token
|
|
||||||
`),
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range cases {
|
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
_, err := ParseCompose(tt.dt, nil)
|
|
||||||
if tt.wantErr {
|
|
||||||
require.Error(t, err)
|
|
||||||
} else {
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// chdir changes the current working directory to the named directory,
|
|
||||||
// and then restore the original working directory at the end of the test.
|
|
||||||
func chdir(t *testing.T, dir string) {
|
|
||||||
olddir, err := os.Getwd()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("chdir: %v", err)
|
|
||||||
}
|
|
||||||
if err := os.Chdir(dir); err != nil {
|
|
||||||
t.Fatalf("chdir %s: %v", dir, err)
|
|
||||||
}
|
|
||||||
t.Cleanup(func() {
|
|
||||||
if err := os.Chdir(olddir); err != nil {
|
|
||||||
t.Errorf("chdir to original working directory %s: %v", olddir, err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package bake
|
|||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/hashicorp/hcl/v2"
|
hcl "github.com/hashicorp/hcl/v2"
|
||||||
"github.com/hashicorp/hcl/v2/hclparse"
|
"github.com/hashicorp/hcl/v2/hclparse"
|
||||||
"github.com/moby/buildkit/solver/errdefs"
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
"github.com/moby/buildkit/solver/pb"
|
"github.com/moby/buildkit/solver/pb"
|
||||||
|
|||||||
162
bake/hcl_test.go
162
bake/hcl_test.go
@@ -620,165 +620,3 @@ func TestHCLBuiltinVars(t *testing.T) {
|
|||||||
require.Equal(t, "foo", *c.Targets[0].Context)
|
require.Equal(t, "foo", *c.Targets[0].Context)
|
||||||
require.Equal(t, "test", *c.Targets[0].Dockerfile)
|
require.Equal(t, "test", *c.Targets[0].Dockerfile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCombineHCLAndJSONTargets(t *testing.T) {
|
|
||||||
c, err := ParseFiles([]File{
|
|
||||||
{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
|
||||||
group "default" {
|
|
||||||
targets = ["a"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "metadata-a" {}
|
|
||||||
target "metadata-b" {}
|
|
||||||
|
|
||||||
target "a" {
|
|
||||||
inherits = ["metadata-a"]
|
|
||||||
context = "."
|
|
||||||
target = "a"
|
|
||||||
}
|
|
||||||
|
|
||||||
target "b" {
|
|
||||||
inherits = ["metadata-b"]
|
|
||||||
context = "."
|
|
||||||
target = "b"
|
|
||||||
}`),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "metadata-a.json",
|
|
||||||
Data: []byte(`
|
|
||||||
{
|
|
||||||
"target": [{
|
|
||||||
"metadata-a": [{
|
|
||||||
"tags": [
|
|
||||||
"app/a:1.0.0",
|
|
||||||
"app/a:latest"
|
|
||||||
]
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
}`),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "metadata-b.json",
|
|
||||||
Data: []byte(`
|
|
||||||
{
|
|
||||||
"target": [{
|
|
||||||
"metadata-b": [{
|
|
||||||
"tags": [
|
|
||||||
"app/b:1.0.0",
|
|
||||||
"app/b:latest"
|
|
||||||
]
|
|
||||||
}]
|
|
||||||
}]
|
|
||||||
}`),
|
|
||||||
},
|
|
||||||
}, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Groups))
|
|
||||||
require.Equal(t, "default", c.Groups[0].Name)
|
|
||||||
require.Equal(t, []string{"a"}, c.Groups[0].Targets)
|
|
||||||
|
|
||||||
require.Equal(t, 4, len(c.Targets))
|
|
||||||
|
|
||||||
require.Equal(t, c.Targets[0].Name, "metadata-a")
|
|
||||||
require.Equal(t, []string{"app/a:1.0.0", "app/a:latest"}, c.Targets[0].Tags)
|
|
||||||
|
|
||||||
require.Equal(t, c.Targets[1].Name, "metadata-b")
|
|
||||||
require.Equal(t, []string{"app/b:1.0.0", "app/b:latest"}, c.Targets[1].Tags)
|
|
||||||
|
|
||||||
require.Equal(t, c.Targets[2].Name, "a")
|
|
||||||
require.Equal(t, ".", *c.Targets[2].Context)
|
|
||||||
require.Equal(t, "a", *c.Targets[2].Target)
|
|
||||||
|
|
||||||
require.Equal(t, c.Targets[3].Name, "b")
|
|
||||||
require.Equal(t, ".", *c.Targets[3].Context)
|
|
||||||
require.Equal(t, "b", *c.Targets[3].Target)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCombineHCLAndJSONVars(t *testing.T) {
|
|
||||||
c, err := ParseFiles([]File{
|
|
||||||
{
|
|
||||||
Name: "docker-bake.hcl",
|
|
||||||
Data: []byte(`
|
|
||||||
variable "ABC" {
|
|
||||||
default = "foo"
|
|
||||||
}
|
|
||||||
variable "DEF" {
|
|
||||||
default = ""
|
|
||||||
}
|
|
||||||
group "default" {
|
|
||||||
targets = ["one"]
|
|
||||||
}
|
|
||||||
target "one" {
|
|
||||||
args = {
|
|
||||||
a = "pre-${ABC}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
target "two" {
|
|
||||||
args = {
|
|
||||||
b = "pre-${DEF}"
|
|
||||||
}
|
|
||||||
}`),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "foo.json",
|
|
||||||
Data: []byte(`{"variable": {"DEF": {"default": "bar"}}, "target": { "one": { "args": {"a": "pre-${ABC}-${DEF}"}} } }`),
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "bar.json",
|
|
||||||
Data: []byte(`{"ABC": "ghi", "DEF": "jkl"}`),
|
|
||||||
},
|
|
||||||
}, nil)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Groups))
|
|
||||||
require.Equal(t, "default", c.Groups[0].Name)
|
|
||||||
require.Equal(t, []string{"one"}, c.Groups[0].Targets)
|
|
||||||
|
|
||||||
require.Equal(t, 2, len(c.Targets))
|
|
||||||
|
|
||||||
require.Equal(t, c.Targets[0].Name, "one")
|
|
||||||
require.Equal(t, map[string]string{"a": "pre-ghi-jkl"}, c.Targets[0].Args)
|
|
||||||
|
|
||||||
require.Equal(t, c.Targets[1].Name, "two")
|
|
||||||
require.Equal(t, map[string]string{"b": "pre-jkl"}, c.Targets[1].Args)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEmptyVariableJSON(t *testing.T) {
|
|
||||||
dt := []byte(`{
|
|
||||||
"variable": {
|
|
||||||
"VAR": {}
|
|
||||||
}
|
|
||||||
}`)
|
|
||||||
_, err := ParseFile(dt, "docker-bake.json")
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFunctionNoParams(t *testing.T) {
|
|
||||||
dt := []byte(`
|
|
||||||
function "foo" {
|
|
||||||
result = "bar"
|
|
||||||
}
|
|
||||||
target "foo_target" {
|
|
||||||
args = {
|
|
||||||
test = foo()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
`)
|
|
||||||
|
|
||||||
_, err := ParseFile(dt, "docker-bake.hcl")
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFunctionNoResult(t *testing.T) {
|
|
||||||
dt := []byte(`
|
|
||||||
function "foo" {
|
|
||||||
params = ["a"]
|
|
||||||
}
|
|
||||||
`)
|
|
||||||
|
|
||||||
_, err := ParseFile(dt, "docker-bake.hcl")
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -16,9 +16,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Opt struct {
|
type Opt struct {
|
||||||
LookupVar func(string) (string, bool)
|
LookupVar func(string) (string, bool)
|
||||||
Vars map[string]string
|
Vars map[string]string
|
||||||
ValidateLabel func(string) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type variable struct {
|
type variable struct {
|
||||||
@@ -111,13 +110,6 @@ func (p *parser) resolveFunction(name string) error {
|
|||||||
}
|
}
|
||||||
p.progressF[name] = struct{}{}
|
p.progressF[name] = struct{}{}
|
||||||
|
|
||||||
if f.Result == nil {
|
|
||||||
return errors.Errorf("empty result not allowed for %s", name)
|
|
||||||
}
|
|
||||||
if f.Params == nil {
|
|
||||||
return errors.Errorf("empty params not allowed for %s", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
paramExprs, paramsDiags := hcl.ExprList(f.Params.Expr)
|
paramExprs, paramsDiags := hcl.ExprList(f.Params.Expr)
|
||||||
if paramsDiags.HasErrors() {
|
if paramsDiags.HasErrors() {
|
||||||
return paramsDiags
|
return paramsDiags
|
||||||
@@ -263,7 +255,6 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
|
|||||||
if err := gohcl.DecodeBody(b, nil, &defs); err != nil {
|
if err := gohcl.DecodeBody(b, nil, &defs); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defsSchema, _ := gohcl.ImpliedBodySchema(defs)
|
|
||||||
|
|
||||||
if opt.LookupVar == nil {
|
if opt.LookupVar == nil {
|
||||||
opt.LookupVar = func(string) (string, bool) {
|
opt.LookupVar = func(string) (string, bool) {
|
||||||
@@ -271,12 +262,6 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if opt.ValidateLabel == nil {
|
|
||||||
opt.ValidateLabel = func(string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &parser{
|
p := &parser{
|
||||||
opt: opt,
|
opt: opt,
|
||||||
|
|
||||||
@@ -308,20 +293,12 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
|
|||||||
p.funcs[v.Name] = v
|
p.funcs[v.Name] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
content, b, diags := b.PartialContent(schema)
|
|
||||||
if diags.HasErrors() {
|
|
||||||
return diags
|
|
||||||
}
|
|
||||||
|
|
||||||
blocks, b, diags := b.PartialContent(defsSchema)
|
|
||||||
if diags.HasErrors() {
|
|
||||||
return diags
|
|
||||||
}
|
|
||||||
|
|
||||||
attrs, diags := b.JustAttributes()
|
attrs, diags := b.JustAttributes()
|
||||||
if diags.HasErrors() {
|
if diags.HasErrors() {
|
||||||
if d := removeAttributesDiags(diags, reserved, p.vars); len(d) > 0 {
|
for _, d := range diags {
|
||||||
return d
|
if d.Detail != "Blocks are not allowed here." {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -377,32 +354,23 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
|
|||||||
if diags, ok := err.(hcl.Diagnostics); ok {
|
if diags, ok := err.(hcl.Diagnostics); ok {
|
||||||
return diags
|
return diags
|
||||||
}
|
}
|
||||||
var subject *hcl.Range
|
|
||||||
var context *hcl.Range
|
|
||||||
if p.funcs[k].Params != nil {
|
|
||||||
subject = &p.funcs[k].Params.Range
|
|
||||||
context = subject
|
|
||||||
} else {
|
|
||||||
for _, block := range blocks.Blocks {
|
|
||||||
if block.Type == "function" && len(block.Labels) == 1 && block.Labels[0] == k {
|
|
||||||
subject = &block.LabelRanges[0]
|
|
||||||
context = &block.DefRange
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return hcl.Diagnostics{
|
return hcl.Diagnostics{
|
||||||
&hcl.Diagnostic{
|
&hcl.Diagnostic{
|
||||||
Severity: hcl.DiagError,
|
Severity: hcl.DiagError,
|
||||||
Summary: "Invalid function",
|
Summary: "Invalid function",
|
||||||
Detail: err.Error(),
|
Detail: err.Error(),
|
||||||
Subject: subject,
|
Subject: &p.funcs[k].Params.Range,
|
||||||
Context: context,
|
Context: &p.funcs[k].Params.Range,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
content, _, diags := b.PartialContent(schema)
|
||||||
|
if diags.HasErrors() {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
for _, a := range content.Attributes {
|
for _, a := range content.Attributes {
|
||||||
return hcl.Diagnostics{
|
return hcl.Diagnostics{
|
||||||
&hcl.Diagnostic{
|
&hcl.Diagnostic{
|
||||||
@@ -478,17 +446,6 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := opt.ValidateLabel(b.Labels[0]); err != nil {
|
|
||||||
return hcl.Diagnostics{
|
|
||||||
&hcl.Diagnostic{
|
|
||||||
Severity: hcl.DiagError,
|
|
||||||
Summary: "Invalid name",
|
|
||||||
Detail: err.Error(),
|
|
||||||
Subject: &b.LabelRanges[0],
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
lblIndex := setLabel(vv, b.Labels[0])
|
lblIndex := setLabel(vv, b.Labels[0])
|
||||||
|
|
||||||
oldValue, exists := t.values[b.Labels[0]]
|
oldValue, exists := t.values[b.Labels[0]]
|
||||||
@@ -539,33 +496,3 @@ func setLabel(v reflect.Value, lbl string) int {
|
|||||||
}
|
}
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeAttributesDiags(diags hcl.Diagnostics, reserved map[string]struct{}, vars map[string]*variable) hcl.Diagnostics {
|
|
||||||
var fdiags hcl.Diagnostics
|
|
||||||
for _, d := range diags {
|
|
||||||
if fout := func(d *hcl.Diagnostic) bool {
|
|
||||||
// https://github.com/docker/buildx/pull/541
|
|
||||||
if d.Detail == "Blocks are not allowed here." {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
for r := range reserved {
|
|
||||||
// JSON body objects don't handle repeated blocks like HCL but
|
|
||||||
// reserved name attributes should be allowed when multi bodies are merged.
|
|
||||||
// https://github.com/hashicorp/hcl/blob/main/json/spec.md#blocks
|
|
||||||
if strings.HasPrefix(d.Detail, fmt.Sprintf(`Argument "%s" was already set at `, r)) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for v := range vars {
|
|
||||||
// Do the same for global variables
|
|
||||||
if strings.HasPrefix(d.Detail, fmt.Sprintf(`Argument "%s" was already set at `, v)) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}(d); !fout {
|
|
||||||
fdiags = append(fdiags, d)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fdiags
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,15 +1,12 @@
|
|||||||
package hclparser
|
package hclparser
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/hashicorp/go-cty-funcs/cidr"
|
"github.com/hashicorp/go-cty-funcs/cidr"
|
||||||
"github.com/hashicorp/go-cty-funcs/crypto"
|
"github.com/hashicorp/go-cty-funcs/crypto"
|
||||||
"github.com/hashicorp/go-cty-funcs/encoding"
|
"github.com/hashicorp/go-cty-funcs/encoding"
|
||||||
"github.com/hashicorp/go-cty-funcs/uuid"
|
"github.com/hashicorp/go-cty-funcs/uuid"
|
||||||
"github.com/hashicorp/hcl/v2/ext/tryfunc"
|
"github.com/hashicorp/hcl/v2/ext/tryfunc"
|
||||||
"github.com/hashicorp/hcl/v2/ext/typeexpr"
|
"github.com/hashicorp/hcl/v2/ext/typeexpr"
|
||||||
"github.com/zclconf/go-cty/cty"
|
|
||||||
"github.com/zclconf/go-cty/cty/function"
|
"github.com/zclconf/go-cty/cty/function"
|
||||||
"github.com/zclconf/go-cty/cty/function/stdlib"
|
"github.com/zclconf/go-cty/cty/function/stdlib"
|
||||||
)
|
)
|
||||||
@@ -99,7 +96,6 @@ var stdlibFunctions = map[string]function.Function{
|
|||||||
"substr": stdlib.SubstrFunc,
|
"substr": stdlib.SubstrFunc,
|
||||||
"subtract": stdlib.SubtractFunc,
|
"subtract": stdlib.SubtractFunc,
|
||||||
"timeadd": stdlib.TimeAddFunc,
|
"timeadd": stdlib.TimeAddFunc,
|
||||||
"timestamp": timestampFunc,
|
|
||||||
"title": stdlib.TitleFunc,
|
"title": stdlib.TitleFunc,
|
||||||
"trim": stdlib.TrimFunc,
|
"trim": stdlib.TrimFunc,
|
||||||
"trimprefix": stdlib.TrimPrefixFunc,
|
"trimprefix": stdlib.TrimPrefixFunc,
|
||||||
@@ -113,14 +109,3 @@ var stdlibFunctions = map[string]function.Function{
|
|||||||
"values": stdlib.ValuesFunc,
|
"values": stdlib.ValuesFunc,
|
||||||
"zipmap": stdlib.ZipmapFunc,
|
"zipmap": stdlib.ZipmapFunc,
|
||||||
}
|
}
|
||||||
|
|
||||||
// timestampFunc constructs a function that returns a string representation of the current date and time.
|
|
||||||
//
|
|
||||||
// This function was imported from terraform's datetime utilities.
|
|
||||||
var timestampFunc = function.New(&function.Spec{
|
|
||||||
Params: []function.Parameter{},
|
|
||||||
Type: function.StaticReturnType(cty.String),
|
|
||||||
Impl: func(args []cty.Value, retType cty.Type) (cty.Value, error) {
|
|
||||||
return cty.StringVal(time.Now().UTC().Format(time.RFC3339)), nil
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|||||||
580
build/build.go
580
build/build.go
@@ -2,45 +2,37 @@ package build
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
_ "crypto/sha256" // ensure digests can be computed
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/content"
|
|
||||||
"github.com/containerd/containerd/content/local"
|
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
"github.com/containerd/containerd/platforms"
|
"github.com/containerd/containerd/platforms"
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/buildx/util/resolver"
|
"github.com/docker/buildx/util/resolver"
|
||||||
"github.com/docker/buildx/util/waitmap"
|
|
||||||
"github.com/docker/cli/opts"
|
"github.com/docker/cli/opts"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/builder/remotecontext/urlutil"
|
|
||||||
dockerclient "github.com/docker/docker/client"
|
dockerclient "github.com/docker/docker/client"
|
||||||
"github.com/docker/docker/pkg/jsonmessage"
|
"github.com/docker/docker/pkg/jsonmessage"
|
||||||
|
"github.com/docker/docker/pkg/urlutil"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/client/llb"
|
"github.com/moby/buildkit/client/llb"
|
||||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
|
||||||
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||||
"github.com/moby/buildkit/session"
|
"github.com/moby/buildkit/session"
|
||||||
"github.com/moby/buildkit/session/upload/uploadprovider"
|
"github.com/moby/buildkit/session/upload/uploadprovider"
|
||||||
"github.com/moby/buildkit/solver/errdefs"
|
|
||||||
"github.com/moby/buildkit/solver/pb"
|
|
||||||
"github.com/moby/buildkit/util/apicaps"
|
"github.com/moby/buildkit/util/apicaps"
|
||||||
"github.com/moby/buildkit/util/entitlements"
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
"github.com/moby/buildkit/util/progress/progresswriter"
|
"github.com/moby/buildkit/util/progress/progresswriter"
|
||||||
@@ -58,41 +50,27 @@ var (
|
|||||||
errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
printFallbackImage = "docker/dockerfile-upstream:1.4-outline@sha256:627443ff4e2d0f635d429cfc1da5388bcd5a70949c38adcd3cd7c4e5df67c73c"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Inputs Inputs
|
Inputs Inputs
|
||||||
|
|
||||||
Allow []entitlements.Entitlement
|
Allow []entitlements.Entitlement
|
||||||
BuildArgs map[string]string
|
BuildArgs map[string]string
|
||||||
CacheFrom []client.CacheOptionsEntry
|
CacheFrom []client.CacheOptionsEntry
|
||||||
CacheTo []client.CacheOptionsEntry
|
CacheTo []client.CacheOptionsEntry
|
||||||
CgroupParent string
|
CgroupParent string
|
||||||
Exports []client.ExportEntry
|
Exports []client.ExportEntry
|
||||||
ExtraHosts []string
|
ExtraHosts []string
|
||||||
ImageIDFile string
|
ImageIDFile string
|
||||||
Labels map[string]string
|
Labels map[string]string
|
||||||
NetworkMode string
|
NetworkMode string
|
||||||
NoCache bool
|
NoCache bool
|
||||||
NoCacheFilter []string
|
Platforms []specs.Platform
|
||||||
Platforms []specs.Platform
|
Pull bool
|
||||||
Pull bool
|
Session []session.Attachable
|
||||||
Session []session.Attachable
|
ShmSize opts.MemBytes
|
||||||
ShmSize opts.MemBytes
|
Tags []string
|
||||||
Tags []string
|
Target string
|
||||||
Target string
|
Ulimits *opts.UlimitOpt
|
||||||
Ulimits *opts.UlimitOpt
|
|
||||||
|
|
||||||
// Linked marks this target as exclusively linked (not requested by the user).
|
|
||||||
Linked bool
|
|
||||||
PrintFunc *PrintFunc
|
|
||||||
}
|
|
||||||
|
|
||||||
type PrintFunc struct {
|
|
||||||
Name string
|
|
||||||
Format string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Inputs struct {
|
type Inputs struct {
|
||||||
@@ -101,21 +79,14 @@ type Inputs struct {
|
|||||||
InStream io.Reader
|
InStream io.Reader
|
||||||
ContextState *llb.State
|
ContextState *llb.State
|
||||||
DockerfileInline string
|
DockerfileInline string
|
||||||
NamedContexts map[string]NamedContext
|
|
||||||
}
|
|
||||||
|
|
||||||
type NamedContext struct {
|
|
||||||
Path string
|
|
||||||
State *llb.State
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type DriverInfo struct {
|
type DriverInfo struct {
|
||||||
Driver driver.Driver
|
Driver driver.Driver
|
||||||
Name string
|
Name string
|
||||||
Platform []specs.Platform
|
Platform []specs.Platform
|
||||||
Err error
|
Err error
|
||||||
ImageOpt imagetools.Opt
|
ImageOpt imagetools.Opt
|
||||||
ProxyConfig map[string]string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type DockerAPI interface {
|
type DockerAPI interface {
|
||||||
@@ -206,10 +177,6 @@ func splitToDriverPairs(availablePlatforms map[string]int, opt map[string]Option
|
|||||||
pp = append(pp, p)
|
pp = append(pp, p)
|
||||||
mm[idx] = pp
|
mm[idx] = pp
|
||||||
}
|
}
|
||||||
// if no platform is specified, use first driver
|
|
||||||
if len(mm) == 0 {
|
|
||||||
mm[0] = nil
|
|
||||||
}
|
|
||||||
dps := make([]driverPair, 0, 2)
|
dps := make([]driverPair, 0, 2)
|
||||||
for idx, pp := range mm {
|
for idx, pp := range mm {
|
||||||
dps = append(dps, driverPair{driverIndex: idx, platforms: pp})
|
dps = append(dps, driverPair{driverIndex: idx, platforms: pp})
|
||||||
@@ -366,8 +333,7 @@ func toRepoOnly(in string) (string, error) {
|
|||||||
return strings.Join(out, ","), nil
|
return strings.Join(out, ","), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
|
func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
|
||||||
d := di.Driver
|
|
||||||
defers := make([]func(), 0, 2)
|
defers := make([]func(), 0, 2)
|
||||||
releaseF := func() {
|
releaseF := func() {
|
||||||
for _, f := range defers {
|
for _, f := range defers {
|
||||||
@@ -533,12 +499,6 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if e.Type == "docker" || e.Type == "image" || e.Type == "oci" {
|
|
||||||
// inline buildinfo attrs from build arg
|
|
||||||
if v, ok := opt.BuildArgs["BUILDKIT_INLINE_BUILDINFO_ATTRS"]; ok {
|
|
||||||
e.Attrs["buildinfo-attrs"] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
so.Exports = opt.Exports
|
so.Exports = opt.Exports
|
||||||
@@ -563,9 +523,6 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
|||||||
if opt.Target != "" {
|
if opt.Target != "" {
|
||||||
so.FrontendAttrs["target"] = opt.Target
|
so.FrontendAttrs["target"] = opt.Target
|
||||||
}
|
}
|
||||||
if len(opt.NoCacheFilter) > 0 {
|
|
||||||
so.FrontendAttrs["no-cache"] = strings.Join(opt.NoCacheFilter, ",")
|
|
||||||
}
|
|
||||||
if opt.NoCache {
|
if opt.NoCache {
|
||||||
so.FrontendAttrs["no-cache"] = ""
|
so.FrontendAttrs["no-cache"] = ""
|
||||||
}
|
}
|
||||||
@@ -576,12 +533,6 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
|||||||
so.FrontendAttrs["label:"+k] = v
|
so.FrontendAttrs["label:"+k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range di.ProxyConfig {
|
|
||||||
if _, ok := opt.BuildArgs[k]; !ok {
|
|
||||||
so.FrontendAttrs["build-arg:"+k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// set platforms
|
// set platforms
|
||||||
if len(opt.Platforms) != 0 {
|
if len(opt.Platforms) != 0 {
|
||||||
pp := make([]string, len(opt.Platforms))
|
pp := make([]string, len(opt.Platforms))
|
||||||
@@ -603,7 +554,7 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
|||||||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||||
case "", "default":
|
case "", "default":
|
||||||
default:
|
default:
|
||||||
return nil, nil, errors.Errorf("network mode %q not supported by buildkit - you can define a custom network for your builder using the network driver-opt in buildx create", opt.NetworkMode)
|
return nil, nil, errors.Errorf("network mode %q not supported by buildkit. You can define a custom network for your builder using the network driver-opt in buildx create.", opt.NetworkMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// setup extrahosts
|
// setup extrahosts
|
||||||
@@ -629,111 +580,7 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
|||||||
return &so, releaseF, nil
|
return &so, releaseF, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ContainerConfig is configuration for a container to run.
|
|
||||||
type ContainerConfig struct {
|
|
||||||
ResultCtx *ResultContext
|
|
||||||
Args []string
|
|
||||||
Env []string
|
|
||||||
User string
|
|
||||||
Cwd string
|
|
||||||
Tty bool
|
|
||||||
Stdin io.ReadCloser
|
|
||||||
Stdout io.WriteCloser
|
|
||||||
Stderr io.WriteCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResultContext is a build result with the client that built it.
|
|
||||||
type ResultContext struct {
|
|
||||||
Client *client.Client
|
|
||||||
Res *gateway.Result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Invoke invokes a build result as a container.
|
|
||||||
func Invoke(ctx context.Context, cfg ContainerConfig) error {
|
|
||||||
if cfg.ResultCtx == nil {
|
|
||||||
return errors.Errorf("result must be provided")
|
|
||||||
}
|
|
||||||
c, res := cfg.ResultCtx.Client, cfg.ResultCtx.Res
|
|
||||||
|
|
||||||
mainCtx := ctx
|
|
||||||
|
|
||||||
_, err := c.Build(context.TODO(), client.SolveOpt{}, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
|
||||||
go func() {
|
|
||||||
<-mainCtx.Done()
|
|
||||||
cancel()
|
|
||||||
}()
|
|
||||||
|
|
||||||
if res.Ref == nil {
|
|
||||||
return nil, errors.Errorf("no reference is registered")
|
|
||||||
}
|
|
||||||
st, err := res.Ref.ToState()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
def, err := st.Marshal(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
imgRef, err := c.Solve(ctx, gateway.SolveRequest{
|
|
||||||
Definition: def.ToPB(),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ctr, err := c.NewContainer(ctx, gateway.NewContainerRequest{
|
|
||||||
Mounts: []gateway.Mount{
|
|
||||||
{
|
|
||||||
Dest: "/",
|
|
||||||
MountType: pb.MountType_BIND,
|
|
||||||
Ref: imgRef.Ref,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer ctr.Release(context.TODO())
|
|
||||||
|
|
||||||
proc, err := ctr.Start(ctx, gateway.StartRequest{
|
|
||||||
Args: cfg.Args,
|
|
||||||
Env: cfg.Env,
|
|
||||||
User: cfg.User,
|
|
||||||
Cwd: cfg.Cwd,
|
|
||||||
Tty: cfg.Tty,
|
|
||||||
Stdin: cfg.Stdin,
|
|
||||||
Stdout: cfg.Stdout,
|
|
||||||
Stderr: cfg.Stderr,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Errorf("failed to start container: %v", err)
|
|
||||||
}
|
|
||||||
errCh := make(chan error)
|
|
||||||
doneCh := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
if err := proc.Wait(); err != nil {
|
|
||||||
errCh <- err
|
|
||||||
return
|
|
||||||
}
|
|
||||||
close(doneCh)
|
|
||||||
}()
|
|
||||||
select {
|
|
||||||
case <-doneCh:
|
|
||||||
case <-ctx.Done():
|
|
||||||
return nil, ctx.Err()
|
|
||||||
case err := <-errCh:
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}, nil)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
||||||
return BuildWithResultHandler(ctx, drivers, opt, docker, configDir, w, nil, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, configDir string, w progress.Writer, resultHandleFunc func(driverIndex int, rCtx *ResultContext), allowNoOutput bool) (resp map[string]*client.SolveResponse, err error) {
|
|
||||||
if len(drivers) == 0 {
|
if len(drivers) == 0 {
|
||||||
return nil, errors.Errorf("driver required for build")
|
return nil, errors.Errorf("driver required for build")
|
||||||
}
|
}
|
||||||
@@ -751,23 +598,13 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if noMobyDriver != nil && !noDefaultLoad() && noPrintFunc(opt) {
|
if noMobyDriver != nil && !noDefaultLoad() {
|
||||||
var noOutputTargets []string
|
for _, opt := range opt {
|
||||||
for name, opt := range opt {
|
if len(opt.Exports) == 0 {
|
||||||
if !opt.Linked && len(opt.Exports) == 0 {
|
logrus.Warnf("No output specified for %s driver. Build result will only remain in the build cache. To push result image into registry use --push or to load image into docker use --load", noMobyDriver.Factory().Name())
|
||||||
noOutputTargets = append(noOutputTargets, name)
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(noOutputTargets) > 0 && !allowNoOutput {
|
|
||||||
var warnNoOutputBuf bytes.Buffer
|
|
||||||
warnNoOutputBuf.WriteString("No output specified ")
|
|
||||||
if len(noOutputTargets) == 1 && noOutputTargets[0] == "default" {
|
|
||||||
warnNoOutputBuf.WriteString(fmt.Sprintf("with %s driver", noMobyDriver.Factory().Name()))
|
|
||||||
} else {
|
|
||||||
warnNoOutputBuf.WriteString(fmt.Sprintf("for %s target(s) with %s driver", strings.Join(noOutputTargets, ", "), noMobyDriver.Factory().Name()))
|
|
||||||
}
|
|
||||||
logrus.Warnf("%s. Build result will only remain in the build cache. To push result image into registry use --push or to load image into docker use --load", warnNoOutputBuf.String())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
m, clients, err := resolveDrivers(ctx, drivers, opt, w)
|
m, clients, err := resolveDrivers(ctx, drivers, opt, w)
|
||||||
@@ -790,12 +627,12 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
|||||||
multiDriver := len(m[k]) > 1
|
multiDriver := len(m[k]) > 1
|
||||||
hasMobyDriver := false
|
hasMobyDriver := false
|
||||||
for i, dp := range m[k] {
|
for i, dp := range m[k] {
|
||||||
di := drivers[dp.driverIndex]
|
d := drivers[dp.driverIndex].Driver
|
||||||
if di.Driver.IsMobyDriver() {
|
if d.IsMobyDriver() {
|
||||||
hasMobyDriver = true
|
hasMobyDriver = true
|
||||||
}
|
}
|
||||||
opt.Platforms = dp.platforms
|
opt.Platforms = dp.platforms
|
||||||
so, release, err := toSolveOpt(ctx, di, multiDriver, opt, dp.bopts, configDir, w, func(name string) (io.WriteCloser, func(), error) {
|
so, release, err := toSolveOpt(ctx, d, multiDriver, opt, dp.bopts, configDir, w, func(name string) (io.WriteCloser, func(), error) {
|
||||||
return newDockerLoader(ctx, docker, name, w)
|
return newDockerLoader(ctx, docker, name, w)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -828,35 +665,8 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate that all links between targets use same drivers
|
|
||||||
for name := range opt {
|
|
||||||
dps := m[name]
|
|
||||||
for _, dp := range dps {
|
|
||||||
for k, v := range dp.so.FrontendAttrs {
|
|
||||||
if strings.HasPrefix(k, "context:") && strings.HasPrefix(v, "target:") {
|
|
||||||
k2 := strings.TrimPrefix(v, "target:")
|
|
||||||
dps2, ok := m[k2]
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.Errorf("failed to find target %s for context %s", k2, strings.TrimPrefix(k, "context:")) // should be validated before already
|
|
||||||
}
|
|
||||||
var found bool
|
|
||||||
for _, dp2 := range dps2 {
|
|
||||||
if dp2.driverIndex == dp.driverIndex {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
return nil, errors.Errorf("failed to use %s as context %s for %s because targets build with different drivers", k2, strings.TrimPrefix(k, "context:"), name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp = map[string]*client.SolveResponse{}
|
resp = map[string]*client.SolveResponse{}
|
||||||
var respMu sync.Mutex
|
var respMu sync.Mutex
|
||||||
results := waitmap.New()
|
|
||||||
|
|
||||||
multiTarget := len(opt) > 1
|
multiTarget := len(opt) > 1
|
||||||
|
|
||||||
@@ -897,12 +707,9 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
|||||||
resp[k] = res[0]
|
resp[k] = res[0]
|
||||||
respMu.Unlock()
|
respMu.Unlock()
|
||||||
if len(res) == 1 {
|
if len(res) == 1 {
|
||||||
dgst := res[0].ExporterResponse[exptypes.ExporterImageDigestKey]
|
digest := res[0].ExporterResponse["containerimage.digest"]
|
||||||
if v, ok := res[0].ExporterResponse[exptypes.ExporterImageConfigDigestKey]; ok {
|
|
||||||
dgst = v
|
|
||||||
}
|
|
||||||
if opt.ImageIDFile != "" {
|
if opt.ImageIDFile != "" {
|
||||||
return os.WriteFile(opt.ImageIDFile, []byte(dgst), 0644)
|
return ioutil.WriteFile(opt.ImageIDFile, []byte(digest), 0644)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -912,7 +719,7 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
|||||||
descs := make([]specs.Descriptor, 0, len(res))
|
descs := make([]specs.Descriptor, 0, len(res))
|
||||||
|
|
||||||
for _, r := range res {
|
for _, r := range res {
|
||||||
s, ok := r.ExporterResponse[exptypes.ExporterImageDigestKey]
|
s, ok := r.ExporterResponse["containerimage.digest"]
|
||||||
if ok {
|
if ok {
|
||||||
descs = append(descs, specs.Descriptor{
|
descs = append(descs, specs.Descriptor{
|
||||||
Digest: digest.Digest(s),
|
Digest: digest.Digest(s),
|
||||||
@@ -946,26 +753,12 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
|||||||
|
|
||||||
itpull := imagetools.New(imageopt)
|
itpull := imagetools.New(imageopt)
|
||||||
|
|
||||||
ref, err := reference.ParseNormalizedNamed(names[0])
|
dt, desc, err := itpull.Combine(ctx, names[0], descs)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
ref = reference.TagNameOnly(ref)
|
|
||||||
|
|
||||||
srcs := make([]*imagetools.Source, len(descs))
|
|
||||||
for i, desc := range descs {
|
|
||||||
srcs[i] = &imagetools.Source{
|
|
||||||
Desc: desc,
|
|
||||||
Ref: ref,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dt, desc, err := itpull.Combine(ctx, srcs)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if opt.ImageIDFile != "" {
|
if opt.ImageIDFile != "" {
|
||||||
if err := os.WriteFile(opt.ImageIDFile, []byte(desc.Digest), 0644); err != nil {
|
if err := ioutil.WriteFile(opt.ImageIDFile, []byte(desc.Digest), 0644); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -998,6 +791,7 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
|||||||
|
|
||||||
for i, dp := range dps {
|
for i, dp := range dps {
|
||||||
so := *dp.so
|
so := *dp.so
|
||||||
|
|
||||||
if multiDriver {
|
if multiDriver {
|
||||||
for i, e := range so.Exports {
|
for i, e := range so.Exports {
|
||||||
switch e.Type {
|
switch e.Type {
|
||||||
@@ -1030,99 +824,19 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
|||||||
pw := progress.WithPrefix(w, k, multiTarget)
|
pw := progress.WithPrefix(w, k, multiTarget)
|
||||||
|
|
||||||
c := clients[dp.driverIndex]
|
c := clients[dp.driverIndex]
|
||||||
|
|
||||||
|
pw = progress.ResetTime(pw)
|
||||||
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
pw = progress.ResetTime(pw)
|
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
if err := waitContextDeps(ctx, dp.driverIndex, results, &so); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
frontendInputs := make(map[string]*pb.Definition)
|
|
||||||
for key, st := range so.FrontendInputs {
|
|
||||||
def, err := st.Marshal(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
frontendInputs[key] = def.ToPB()
|
|
||||||
}
|
|
||||||
|
|
||||||
req := gateway.SolveRequest{
|
|
||||||
Frontend: so.Frontend,
|
|
||||||
FrontendOpt: so.FrontendAttrs,
|
|
||||||
FrontendInputs: frontendInputs,
|
|
||||||
}
|
|
||||||
so.Frontend = ""
|
|
||||||
so.FrontendAttrs = nil
|
|
||||||
so.FrontendInputs = nil
|
|
||||||
|
|
||||||
ch, done := progress.NewChannel(pw)
|
ch, done := progress.NewChannel(pw)
|
||||||
defer func() { <-done }()
|
defer func() { <-done }()
|
||||||
|
rr, err := c.Solve(ctx, nil, so, ch)
|
||||||
cc := c
|
|
||||||
var printRes map[string][]byte
|
|
||||||
rr, err := c.Build(ctx, so, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
|
||||||
var isFallback bool
|
|
||||||
var origErr error
|
|
||||||
for {
|
|
||||||
if opt.PrintFunc != nil {
|
|
||||||
if _, ok := req.FrontendOpt["frontend.caps"]; !ok {
|
|
||||||
req.FrontendOpt["frontend.caps"] = "moby.buildkit.frontend.subrequests+forward"
|
|
||||||
} else {
|
|
||||||
req.FrontendOpt["frontend.caps"] += ",moby.buildkit.frontend.subrequests+forward"
|
|
||||||
}
|
|
||||||
req.FrontendOpt["requestid"] = "frontend." + opt.PrintFunc.Name
|
|
||||||
if isFallback {
|
|
||||||
req.FrontendOpt["build-arg:BUILDKIT_SYNTAX"] = printFallbackImage
|
|
||||||
}
|
|
||||||
}
|
|
||||||
res, err := c.Solve(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
if origErr != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var reqErr *errdefs.UnsupportedSubrequestError
|
|
||||||
if !isFallback {
|
|
||||||
if errors.As(err, &reqErr) {
|
|
||||||
switch reqErr.Name {
|
|
||||||
case "frontend.outline", "frontend.targets":
|
|
||||||
isFallback = true
|
|
||||||
origErr = err
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// buildkit v0.8 vendored in Docker 20.10 does not support typed errors
|
|
||||||
if strings.Contains(err.Error(), "unsupported request frontend.outline") || strings.Contains(err.Error(), "unsupported request frontend.targets") {
|
|
||||||
isFallback = true
|
|
||||||
origErr = err
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if opt.PrintFunc != nil {
|
|
||||||
printRes = res.Metadata
|
|
||||||
}
|
|
||||||
results.Set(resultKey(dp.driverIndex, k), res)
|
|
||||||
if resultHandleFunc != nil {
|
|
||||||
resultHandleFunc(dp.driverIndex, &ResultContext{cc, res})
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
}, ch)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
res[i] = rr
|
res[i] = rr
|
||||||
|
|
||||||
if rr.ExporterResponse == nil {
|
|
||||||
rr.ExporterResponse = map[string]string{}
|
|
||||||
}
|
|
||||||
for k, v := range printRes {
|
|
||||||
rr.ExporterResponse[k] = string(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
d := drivers[dp.driverIndex].Driver
|
d := drivers[dp.driverIndex].Driver
|
||||||
if d.IsMobyDriver() {
|
if d.IsMobyDriver() {
|
||||||
for _, e := range so.Exports {
|
for _, e := range so.Exports {
|
||||||
@@ -1133,27 +847,13 @@ func BuildWithResultHandler(ctx context.Context, drivers []DriverInfo, opt map[s
|
|||||||
return errors.Errorf("tag is needed when pushing to registry")
|
return errors.Errorf("tag is needed when pushing to registry")
|
||||||
}
|
}
|
||||||
pw := progress.ResetTime(pw)
|
pw := progress.ResetTime(pw)
|
||||||
pushList := strings.Split(pushNames, ",")
|
for _, name := range strings.Split(pushNames, ",") {
|
||||||
for _, name := range pushList {
|
|
||||||
if err := progress.Wrap(fmt.Sprintf("pushing %s with docker", name), pw.Write, func(l progress.SubLogger) error {
|
if err := progress.Wrap(fmt.Sprintf("pushing %s with docker", name), pw.Write, func(l progress.SubLogger) error {
|
||||||
return pushWithMoby(ctx, d, name, l)
|
return pushWithMoby(ctx, d, name, l)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
remoteDigest, err := remoteDigestWithMoby(ctx, d, pushList[0])
|
|
||||||
if err == nil && remoteDigest != "" {
|
|
||||||
// old daemons might not have containerimage.config.digest set
|
|
||||||
// in response so use containerimage.digest value for it if available
|
|
||||||
if _, ok := rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey]; !ok {
|
|
||||||
if v, ok := rr.ExporterResponse[exptypes.ExporterImageDigestKey]; ok {
|
|
||||||
rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rr.ExporterResponse[exptypes.ExporterImageDigestKey] = remoteDigest
|
|
||||||
} else if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1258,31 +958,8 @@ func pushWithMoby(ctx context.Context, d driver.Driver, name string, l progress.
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func remoteDigestWithMoby(ctx context.Context, d driver.Driver, name string) (string, error) {
|
|
||||||
api := d.Config().DockerAPI
|
|
||||||
if api == nil {
|
|
||||||
return "", errors.Errorf("invalid empty Docker API reference") // should never happen
|
|
||||||
}
|
|
||||||
creds, err := imagetools.RegistryAuthForRef(name, d.Config().Auth)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
image, _, err := api.ImageInspectWithRaw(ctx, name)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if len(image.RepoDigests) == 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
remoteImage, err := api.DistributionInspect(ctx, name, creds)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return remoteImage.Descriptor.Digest.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createTempDockerfile(r io.Reader) (string, error) {
|
func createTempDockerfile(r io.Reader) (string, error) {
|
||||||
dir, err := os.MkdirTemp("", "dockerfile")
|
dir, err := ioutil.TempDir("", "dockerfile")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -1341,7 +1018,7 @@ func LoadInputs(ctx context.Context, d driver.Driver, inp Inputs, pw progress.Wr
|
|||||||
}
|
}
|
||||||
// stdin is dockerfile
|
// stdin is dockerfile
|
||||||
dockerfileReader = buf
|
dockerfileReader = buf
|
||||||
inp.ContextPath, _ = os.MkdirTemp("", "empty-dir")
|
inp.ContextPath, _ = ioutil.TempDir("", "empty-dir")
|
||||||
toRemove = append(toRemove, inp.ContextPath)
|
toRemove = append(toRemove, inp.ContextPath)
|
||||||
target.LocalDirs["context"] = inp.ContextPath
|
target.LocalDirs["context"] = inp.ContextPath
|
||||||
}
|
}
|
||||||
@@ -1361,7 +1038,7 @@ func LoadInputs(ctx context.Context, d driver.Driver, inp Inputs, pw progress.Wr
|
|||||||
|
|
||||||
case urlutil.IsGitURL(inp.ContextPath), urlutil.IsURL(inp.ContextPath):
|
case urlutil.IsGitURL(inp.ContextPath), urlutil.IsURL(inp.ContextPath):
|
||||||
if inp.DockerfilePath == "-" {
|
if inp.DockerfilePath == "-" {
|
||||||
dockerfileReader = inp.InStream
|
return nil, errors.Errorf("Dockerfile from stdin is not supported with remote contexts")
|
||||||
}
|
}
|
||||||
target.FrontendAttrs["context"] = inp.ContextPath
|
target.FrontendAttrs["context"] = inp.ContextPath
|
||||||
default:
|
default:
|
||||||
@@ -1403,62 +1080,6 @@ func LoadInputs(ctx context.Context, d driver.Driver, inp Inputs, pw progress.Wr
|
|||||||
|
|
||||||
target.FrontendAttrs["filename"] = dockerfileName
|
target.FrontendAttrs["filename"] = dockerfileName
|
||||||
|
|
||||||
for k, v := range inp.NamedContexts {
|
|
||||||
target.FrontendAttrs["frontend.caps"] = "moby.buildkit.frontend.contexts+forward"
|
|
||||||
if v.State != nil {
|
|
||||||
target.FrontendAttrs["context:"+k] = "input:" + k
|
|
||||||
if target.FrontendInputs == nil {
|
|
||||||
target.FrontendInputs = make(map[string]llb.State)
|
|
||||||
}
|
|
||||||
target.FrontendInputs[k] = *v.State
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if urlutil.IsGitURL(v.Path) || urlutil.IsURL(v.Path) || strings.HasPrefix(v.Path, "docker-image://") || strings.HasPrefix(v.Path, "target:") {
|
|
||||||
target.FrontendAttrs["context:"+k] = v.Path
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// handle OCI layout
|
|
||||||
if strings.HasPrefix(v.Path, "oci-layout://") {
|
|
||||||
pathAlone := strings.TrimPrefix(v.Path, "oci-layout://")
|
|
||||||
parts := strings.SplitN(pathAlone, "@", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return nil, errors.Errorf("invalid oci-layout context %s, must be oci-layout:///path/to/layout@sha256:hash", v.Path)
|
|
||||||
}
|
|
||||||
localPath := parts[0]
|
|
||||||
dgst, err := digest.Parse(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "invalid oci-layout context %s, does not have proper hash, must be oci-layout:///path/to/layout@sha256:hash", v.Path)
|
|
||||||
}
|
|
||||||
store, err := local.NewStore(localPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "invalid store at %s", localPath)
|
|
||||||
}
|
|
||||||
// now we can add it
|
|
||||||
if target.OCIStores == nil {
|
|
||||||
target.OCIStores = map[string]content.Store{}
|
|
||||||
}
|
|
||||||
target.OCIStores[k] = store
|
|
||||||
|
|
||||||
target.FrontendAttrs["context:"+k] = fmt.Sprintf("oci-layout:%s@%s", k, dgst.String())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
st, err := os.Stat(v.Path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to get build context %v", k)
|
|
||||||
}
|
|
||||||
if !st.IsDir() {
|
|
||||||
return nil, errors.Wrapf(syscall.ENOTDIR, "failed to get build context path %v", v)
|
|
||||||
}
|
|
||||||
localName := k
|
|
||||||
if k == "context" || k == "dockerfile" {
|
|
||||||
localName = "_" + k // underscore to avoid collisions
|
|
||||||
}
|
|
||||||
target.LocalDirs[localName] = v.Path
|
|
||||||
target.FrontendAttrs["context:"+k] = "local:" + localName
|
|
||||||
}
|
|
||||||
|
|
||||||
release := func() {
|
release := func() {
|
||||||
for _, dir := range toRemove {
|
for _, dir := range toRemove {
|
||||||
os.RemoveAll(dir)
|
os.RemoveAll(dir)
|
||||||
@@ -1467,96 +1088,6 @@ func LoadInputs(ctx context.Context, d driver.Driver, inp Inputs, pw progress.Wr
|
|||||||
return release, nil
|
return release, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resultKey(index int, name string) string {
|
|
||||||
return fmt.Sprintf("%d-%s", index, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitContextDeps(ctx context.Context, index int, results *waitmap.Map, so *client.SolveOpt) error {
|
|
||||||
m := map[string]string{}
|
|
||||||
for k, v := range so.FrontendAttrs {
|
|
||||||
if strings.HasPrefix(k, "context:") && strings.HasPrefix(v, "target:") {
|
|
||||||
target := resultKey(index, strings.TrimPrefix(v, "target:"))
|
|
||||||
m[target] = k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(m) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
keys := make([]string, 0, len(m))
|
|
||||||
for k := range m {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
res, err := results.Get(ctx, keys...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range m {
|
|
||||||
r, ok := res[k]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
rr, ok := r.(*gateway.Result)
|
|
||||||
if !ok {
|
|
||||||
return errors.Errorf("invalid result type %T", rr)
|
|
||||||
}
|
|
||||||
if so.FrontendAttrs == nil {
|
|
||||||
so.FrontendAttrs = map[string]string{}
|
|
||||||
}
|
|
||||||
if so.FrontendInputs == nil {
|
|
||||||
so.FrontendInputs = map[string]llb.State{}
|
|
||||||
}
|
|
||||||
if len(rr.Refs) > 0 {
|
|
||||||
for platform, r := range rr.Refs {
|
|
||||||
st, err := r.ToState()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
so.FrontendInputs[k+"::"+platform] = st
|
|
||||||
so.FrontendAttrs[v+"::"+platform] = "input:" + k + "::" + platform
|
|
||||||
metadata := make(map[string][]byte)
|
|
||||||
if dt, ok := rr.Metadata[exptypes.ExporterImageConfigKey+"/"+platform]; ok {
|
|
||||||
metadata[exptypes.ExporterImageConfigKey] = dt
|
|
||||||
}
|
|
||||||
if dt, ok := rr.Metadata[exptypes.ExporterBuildInfo+"/"+platform]; ok {
|
|
||||||
metadata[exptypes.ExporterBuildInfo] = dt
|
|
||||||
}
|
|
||||||
if len(metadata) > 0 {
|
|
||||||
dt, err := json.Marshal(metadata)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
so.FrontendAttrs["input-metadata:"+k+"::"+platform] = string(dt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
delete(so.FrontendAttrs, v)
|
|
||||||
}
|
|
||||||
if rr.Ref != nil {
|
|
||||||
st, err := rr.Ref.ToState()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
so.FrontendInputs[k] = st
|
|
||||||
so.FrontendAttrs[v] = "input:" + k
|
|
||||||
metadata := make(map[string][]byte)
|
|
||||||
if dt, ok := rr.Metadata[exptypes.ExporterImageConfigKey]; ok {
|
|
||||||
metadata[exptypes.ExporterImageConfigKey] = dt
|
|
||||||
}
|
|
||||||
if dt, ok := rr.Metadata[exptypes.ExporterBuildInfo]; ok {
|
|
||||||
metadata[exptypes.ExporterBuildInfo] = dt
|
|
||||||
}
|
|
||||||
if len(metadata) > 0 {
|
|
||||||
dt, err := json.Marshal(metadata)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
so.FrontendAttrs["input-metadata:"+k] = string(dt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func notSupported(d driver.Driver, f driver.Feature) error {
|
func notSupported(d driver.Driver, f driver.Feature) error {
|
||||||
return errors.Errorf("%s feature is currently not supported for %s driver. Please switch to a different driver (eg. \"docker buildx create --use\")", f, d.Factory().Name())
|
return errors.Errorf("%s feature is currently not supported for %s driver. Please switch to a different driver (eg. \"docker buildx create --use\")", f, d.Factory().Name())
|
||||||
}
|
}
|
||||||
@@ -1686,24 +1217,15 @@ func tryNodeIdentifier(configDir string) (out string) {
|
|||||||
if _, err := rand.Read(b); err != nil {
|
if _, err := rand.Read(b); err != nil {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
if err := os.WriteFile(sessionFile, []byte(hex.EncodeToString(b)), 0600); err != nil {
|
if err := ioutil.WriteFile(sessionFile, []byte(hex.EncodeToString(b)), 0600); err != nil {
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dt, err := os.ReadFile(sessionFile)
|
dt, err := ioutil.ReadFile(sessionFile)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return string(dt)
|
return string(dt)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func noPrintFunc(opt map[string]Options) bool {
|
|
||||||
for _, v := range opt {
|
|
||||||
if v.PrintFunc != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ package build
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"os"
|
"io/ioutil"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
@@ -53,11 +53,11 @@ func createTempDockerfileFromURL(ctx context.Context, d driver.Driver, url strin
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
dir, err := os.MkdirTemp("", "buildx")
|
dir, err := ioutil.TempDir("", "buildx")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := os.WriteFile(filepath.Join(dir, "Dockerfile"), dt, 0600); err != nil {
|
if err := ioutil.WriteFile(filepath.Join(dir, "Dockerfile"), dt, 0600); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
out = dir
|
out = dir
|
||||||
|
|||||||
@@ -15,8 +15,13 @@ import (
|
|||||||
cliflags "github.com/docker/cli/cli/flags"
|
cliflags "github.com/docker/cli/cli/flags"
|
||||||
"github.com/moby/buildkit/solver/errdefs"
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
"github.com/moby/buildkit/util/stack"
|
"github.com/moby/buildkit/util/stack"
|
||||||
|
"github.com/moby/buildkit/util/tracing/detect"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/azure"
|
_ "github.com/moby/buildkit/util/tracing/detect/delegated"
|
||||||
|
_ "github.com/moby/buildkit/util/tracing/env"
|
||||||
|
|
||||||
|
// FIXME: "k8s.io/client-go/plugin/pkg/client/auth/azure" is excluded because of compilation error
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
|
_ "k8s.io/client-go/plugin/pkg/client/auth/openstack"
|
||||||
@@ -24,67 +29,77 @@ import (
|
|||||||
_ "github.com/docker/buildx/driver/docker"
|
_ "github.com/docker/buildx/driver/docker"
|
||||||
_ "github.com/docker/buildx/driver/docker-container"
|
_ "github.com/docker/buildx/driver/docker-container"
|
||||||
_ "github.com/docker/buildx/driver/kubernetes"
|
_ "github.com/docker/buildx/driver/kubernetes"
|
||||||
_ "github.com/docker/buildx/driver/remote"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var experimental string
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
seed.WithTimeAndRand()
|
seed.WithTimeAndRand()
|
||||||
stack.SetVersionInfo(version.Version, version.Revision)
|
stack.SetVersionInfo(version.Version, version.Revision)
|
||||||
}
|
|
||||||
|
|
||||||
func runStandalone(cmd *command.DockerCli) error {
|
detect.ServiceName = "buildx"
|
||||||
if err := cmd.Initialize(cliflags.NewClientOptions()); err != nil {
|
// do not log tracing errors to stdio
|
||||||
return err
|
otel.SetErrorHandler(skipErrors{})
|
||||||
}
|
|
||||||
rootCmd := commands.NewRootCmd(os.Args[0], false, cmd)
|
|
||||||
return rootCmd.Execute()
|
|
||||||
}
|
|
||||||
|
|
||||||
func runPlugin(cmd *command.DockerCli) error {
|
|
||||||
rootCmd := commands.NewRootCmd("buildx", true, cmd)
|
|
||||||
return plugin.RunPlugin(cmd, rootCmd, manager.Metadata{
|
|
||||||
SchemaVersion: "0.1.0",
|
|
||||||
Vendor: "Docker Inc.",
|
|
||||||
Version: version.Version,
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
cmd, err := command.NewDockerCli()
|
if os.Getenv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND") == "" {
|
||||||
|
if len(os.Args) < 2 || os.Args[1] != manager.MetadataSubcommandName {
|
||||||
|
dockerCli, err := command.NewDockerCli()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
opts := cliflags.NewClientOptions()
|
||||||
|
dockerCli.Initialize(opts)
|
||||||
|
rootCmd := commands.NewRootCmd(os.Args[0], false, dockerCli)
|
||||||
|
if err := rootCmd.Execute(); err != nil {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dockerCli, err := command.NewDockerCli()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintln(os.Stderr, err)
|
fmt.Fprintln(os.Stderr, err)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
if plugin.RunningStandalone() {
|
p := commands.NewRootCmd("buildx", true, dockerCli)
|
||||||
err = runStandalone(cmd)
|
meta := manager.Metadata{
|
||||||
} else {
|
SchemaVersion: "0.1.0",
|
||||||
err = runPlugin(cmd)
|
Vendor: "Docker Inc.",
|
||||||
}
|
Version: version.Version,
|
||||||
if err == nil {
|
Experimental: experimental != "",
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if sterr, ok := err.(cli.StatusError); ok {
|
if err := plugin.RunPlugin(dockerCli, p, meta); err != nil {
|
||||||
if sterr.Status != "" {
|
if sterr, ok := err.(cli.StatusError); ok {
|
||||||
fmt.Fprintln(cmd.Err(), sterr.Status)
|
if sterr.Status != "" {
|
||||||
|
fmt.Fprintln(dockerCli.Err(), sterr.Status)
|
||||||
|
}
|
||||||
|
// StatusError should only be used for errors, and all errors should
|
||||||
|
// have a non-zero exit status, so never exit with 0
|
||||||
|
if sterr.StatusCode == 0 {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
os.Exit(sterr.StatusCode)
|
||||||
}
|
}
|
||||||
// StatusError should only be used for errors, and all errors should
|
for _, s := range errdefs.Sources(err) {
|
||||||
// have a non-zero exit status, so never exit with 0
|
s.Print(dockerCli.Err())
|
||||||
if sterr.StatusCode == 0 {
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
os.Exit(sterr.StatusCode)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, s := range errdefs.Sources(err) {
|
if debug.IsEnabled() {
|
||||||
s.Print(cmd.Err())
|
fmt.Fprintf(dockerCli.Err(), "error: %+v", stack.Formatter(err))
|
||||||
}
|
} else {
|
||||||
if debug.IsEnabled() {
|
fmt.Fprintf(dockerCli.Err(), "error: %v\n", err)
|
||||||
fmt.Fprintf(cmd.Err(), "ERROR: %+v", stack.Formatter(err))
|
}
|
||||||
} else {
|
|
||||||
fmt.Fprintf(cmd.Err(), "ERROR: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type skipErrors struct{}
|
||||||
|
|
||||||
|
func (skipErrors) Handle(err error) {}
|
||||||
|
|||||||
@@ -1,19 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/moby/buildkit/util/tracing/detect"
|
|
||||||
"go.opentelemetry.io/otel"
|
|
||||||
|
|
||||||
_ "github.com/moby/buildkit/util/tracing/detect/delegated"
|
|
||||||
_ "github.com/moby/buildkit/util/tracing/env"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
detect.ServiceName = "buildx"
|
|
||||||
// do not log tracing errors to stdio
|
|
||||||
otel.SetErrorHandler(skipErrors{})
|
|
||||||
}
|
|
||||||
|
|
||||||
type skipErrors struct{}
|
|
||||||
|
|
||||||
func (skipErrors) Handle(err error) {}
|
|
||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/buildx/util/tracing"
|
"github.com/docker/buildx/util/tracing"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@@ -75,7 +76,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
|||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancel(context.TODO())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
printer := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
|
printer := progress.NewPrinter(ctx2, os.Stderr, in.progress)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if printer != nil {
|
if printer != nil {
|
||||||
@@ -104,8 +105,8 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
|||||||
}
|
}
|
||||||
|
|
||||||
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{
|
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{
|
||||||
// don't forget to update documentation if you add a new
|
// Don't forget to update documentation if you add a new
|
||||||
// built-in variable: docs/guides/bake/file-definition.md#built-in-variables
|
// built-in variable: docs/reference/buildx_bake.md#built-in-variables
|
||||||
"BAKE_CMD_CONTEXT": cmdContext,
|
"BAKE_CMD_CONTEXT": cmdContext,
|
||||||
"BAKE_LOCAL_PLATFORM": platforms.DefaultString(),
|
"BAKE_LOCAL_PLATFORM": platforms.DefaultString(),
|
||||||
})
|
})
|
||||||
@@ -147,15 +148,19 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
|||||||
|
|
||||||
resp, err := build.Build(ctx, dis, bo, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
resp, err := build.Build(ctx, dis, bo, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return wrapBuildError(err, true)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(in.metadataFile) > 0 {
|
if len(in.metadataFile) > 0 && resp != nil {
|
||||||
dt := make(map[string]interface{})
|
mdata := map[string]map[string]string{}
|
||||||
for t, r := range resp {
|
for k, r := range resp {
|
||||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
mdata[k] = r.ExporterResponse
|
||||||
}
|
}
|
||||||
if err := writeMetadataFile(in.metadataFile, dt); err != nil {
|
mdatab, err := json.MarshalIndent(mdata, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ioutils.AtomicWriteFile(in.metadataFile, mdatab, 0644); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -186,10 +191,10 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
||||||
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
|
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for `--set=*.output=type=docker`")
|
||||||
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
|
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
|
||||||
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
|
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for `--set=*.output=type=registry`")
|
||||||
flags.StringArrayVar(&options.overrides, "set", nil, `Override target value (e.g., "targetpattern.key=value")`)
|
flags.StringArrayVar(&options.overrides, "set", nil, "Override target value (e.g., `targetpattern.key=value`)")
|
||||||
|
|
||||||
commonBuildFlags(&options.commonOptions, flags)
|
commonBuildFlags(&options.commonOptions, flags)
|
||||||
|
|
||||||
|
|||||||
@@ -1,46 +1,31 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
|
||||||
"encoding/csv"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/containerd/console"
|
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
"github.com/docker/buildx/monitor"
|
|
||||||
"github.com/docker/buildx/util/buildflags"
|
"github.com/docker/buildx/util/buildflags"
|
||||||
"github.com/docker/buildx/util/confutil"
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/buildx/util/tracing"
|
"github.com/docker/buildx/util/tracing"
|
||||||
"github.com/docker/cli-docs-tool/annotation"
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/config"
|
|
||||||
dockeropts "github.com/docker/cli/opts"
|
dockeropts "github.com/docker/cli/opts"
|
||||||
"github.com/docker/distribution/reference"
|
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/docker/go-units"
|
"github.com/docker/go-units"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/session/auth/authprovider"
|
"github.com/moby/buildkit/session/auth/authprovider"
|
||||||
"github.com/moby/buildkit/solver/errdefs"
|
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/moby/buildkit/util/grpcerrors"
|
|
||||||
"github.com/morikuni/aec"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultTargetName = "default"
|
const defaultTargetName = "default"
|
||||||
@@ -48,29 +33,25 @@ const defaultTargetName = "default"
|
|||||||
type buildOptions struct {
|
type buildOptions struct {
|
||||||
contextPath string
|
contextPath string
|
||||||
dockerfileName string
|
dockerfileName string
|
||||||
printFunc string
|
|
||||||
|
|
||||||
allow []string
|
allow []string
|
||||||
buildArgs []string
|
buildArgs []string
|
||||||
cacheFrom []string
|
cacheFrom []string
|
||||||
cacheTo []string
|
cacheTo []string
|
||||||
cgroupParent string
|
cgroupParent string
|
||||||
contexts []string
|
extraHosts []string
|
||||||
extraHosts []string
|
imageIDFile string
|
||||||
imageIDFile string
|
labels []string
|
||||||
labels []string
|
networkMode string
|
||||||
networkMode string
|
outputs []string
|
||||||
noCacheFilter []string
|
platforms []string
|
||||||
outputs []string
|
quiet bool
|
||||||
platforms []string
|
secrets []string
|
||||||
quiet bool
|
shmSize dockeropts.MemBytes
|
||||||
secrets []string
|
ssh []string
|
||||||
shmSize dockeropts.MemBytes
|
tags []string
|
||||||
ssh []string
|
target string
|
||||||
tags []string
|
ulimits *dockeropts.UlimitOpt
|
||||||
target string
|
|
||||||
ulimits *dockeropts.UlimitOpt
|
|
||||||
invoke string
|
|
||||||
commonOptions
|
commonOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -108,46 +89,29 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
|||||||
pull = *in.pull
|
pull = *in.pull
|
||||||
}
|
}
|
||||||
|
|
||||||
if noCache && len(in.noCacheFilter) > 0 {
|
|
||||||
return errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
|
|
||||||
}
|
|
||||||
|
|
||||||
if in.quiet && in.progress != "auto" && in.progress != "quiet" {
|
if in.quiet && in.progress != "auto" && in.progress != "quiet" {
|
||||||
return errors.Errorf("progress=%s and quiet cannot be used together", in.progress)
|
return errors.Errorf("progress=%s and quiet cannot be used together", in.progress)
|
||||||
} else if in.quiet {
|
} else if in.quiet {
|
||||||
in.progress = "quiet"
|
in.progress = "quiet"
|
||||||
}
|
}
|
||||||
|
|
||||||
contexts, err := parseContextNames(in.contexts)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
printFunc, err := parsePrintFunc(in.printFunc)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := build.Options{
|
opts := build.Options{
|
||||||
Inputs: build.Inputs{
|
Inputs: build.Inputs{
|
||||||
ContextPath: in.contextPath,
|
ContextPath: in.contextPath,
|
||||||
DockerfilePath: in.dockerfileName,
|
DockerfilePath: in.dockerfileName,
|
||||||
InStream: os.Stdin,
|
InStream: os.Stdin,
|
||||||
NamedContexts: contexts,
|
|
||||||
},
|
},
|
||||||
BuildArgs: listToMap(in.buildArgs, true),
|
BuildArgs: listToMap(in.buildArgs, true),
|
||||||
ExtraHosts: in.extraHosts,
|
ExtraHosts: in.extraHosts,
|
||||||
ImageIDFile: in.imageIDFile,
|
ImageIDFile: in.imageIDFile,
|
||||||
Labels: listToMap(in.labels, false),
|
Labels: listToMap(in.labels, false),
|
||||||
NetworkMode: in.networkMode,
|
NetworkMode: in.networkMode,
|
||||||
NoCache: noCache,
|
NoCache: noCache,
|
||||||
NoCacheFilter: in.noCacheFilter,
|
Pull: pull,
|
||||||
Pull: pull,
|
ShmSize: in.shmSize,
|
||||||
ShmSize: in.shmSize,
|
Tags: in.tags,
|
||||||
Tags: in.tags,
|
Target: in.target,
|
||||||
Target: in.target,
|
Ulimits: in.ulimits,
|
||||||
Ulimits: in.ulimits,
|
|
||||||
PrintFunc: printFunc,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
platforms, err := platformutil.Parse(in.platforms)
|
platforms, err := platformutil.Parse(in.platforms)
|
||||||
@@ -156,8 +120,7 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
|||||||
}
|
}
|
||||||
opts.Platforms = platforms
|
opts.Platforms = platforms
|
||||||
|
|
||||||
dockerConfig := config.LoadDefaultConfigFile(os.Stderr)
|
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(os.Stderr))
|
||||||
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(dockerConfig))
|
|
||||||
|
|
||||||
secrets, err := buildflags.ParseSecretSpecs(in.secrets)
|
secrets, err := buildflags.ParseSecretSpecs(in.secrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -240,179 +203,48 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
|||||||
contextPathHash = in.contextPath
|
contextPathHash = in.contextPath
|
||||||
}
|
}
|
||||||
|
|
||||||
imageID, res, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile, in.invoke != "")
|
imageID, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile)
|
||||||
err = wrapBuildError(err, false)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if in.invoke != "" {
|
|
||||||
cfg, err := parseInvokeConfig(in.invoke)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cfg.ResultCtx = res
|
|
||||||
con := console.Current()
|
|
||||||
if err := con.SetRaw(); err != nil {
|
|
||||||
return errors.Errorf("failed to configure terminal: %v", err)
|
|
||||||
}
|
|
||||||
err = monitor.RunMonitor(ctx, cfg, func(ctx context.Context) (*build.ResultContext, error) {
|
|
||||||
_, rr, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile, true)
|
|
||||||
return rr, err
|
|
||||||
}, io.NopCloser(os.Stdin), nopCloser{os.Stdout}, nopCloser{os.Stderr})
|
|
||||||
if err != nil {
|
|
||||||
logrus.Warnf("failed to run monitor: %v", err)
|
|
||||||
}
|
|
||||||
con.Reset()
|
|
||||||
}
|
|
||||||
|
|
||||||
if in.quiet {
|
if in.quiet {
|
||||||
fmt.Println(imageID)
|
fmt.Println(imageID)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type nopCloser struct {
|
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string, metadataFile string) (imageID string, err error) {
|
||||||
io.WriteCloser
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c nopCloser) Close() error { return nil }
|
|
||||||
|
|
||||||
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string, metadataFile string, allowNoOutput bool) (imageID string, res *build.ResultContext, err error) {
|
|
||||||
dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
|
dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancel(context.TODO())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
printer := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, progressMode)
|
printer := progress.NewPrinter(ctx2, os.Stderr, progressMode)
|
||||||
|
|
||||||
var mu sync.Mutex
|
resp, err := build.Build(ctx, dis, opts, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||||
var idx int
|
|
||||||
resp, err := build.BuildWithResultHandler(ctx, dis, opts, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer, func(driverIndex int, gotRes *build.ResultContext) {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
if res == nil || driverIndex < idx {
|
|
||||||
idx, res = driverIndex, gotRes
|
|
||||||
}
|
|
||||||
}, allowNoOutput)
|
|
||||||
err1 := printer.Wait()
|
err1 := printer.Wait()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = err1
|
err = err1
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", nil, err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(metadataFile) > 0 && resp != nil {
|
if len(metadataFile) > 0 && resp != nil {
|
||||||
if err := writeMetadataFile(metadataFile, decodeExporterResponse(resp[defaultTargetName].ExporterResponse)); err != nil {
|
mdatab, err := json.MarshalIndent(resp[defaultTargetName].ExporterResponse, "", " ")
|
||||||
return "", nil, err
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if err := ioutils.AtomicWriteFile(metadataFile, mdatab, 0644); err != nil {
|
||||||
|
return "", err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
printWarnings(os.Stderr, printer.Warnings(), progressMode)
|
return resp[defaultTargetName].ExporterResponse["containerimage.digest"], err
|
||||||
|
|
||||||
for k := range resp {
|
|
||||||
if opts[k].PrintFunc != nil {
|
|
||||||
if err := printResult(opts[k].PrintFunc, resp[k].ExporterResponse); err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp[defaultTargetName].ExporterResponse["containerimage.digest"], res, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseInvokeConfig(invoke string) (cfg build.ContainerConfig, err error) {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(invoke))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return cfg, err
|
|
||||||
}
|
|
||||||
cfg.Tty = true
|
|
||||||
if len(fields) == 1 && !strings.Contains(fields[0], "=") {
|
|
||||||
cfg.Args = []string{fields[0]}
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
||||||
var entrypoint string
|
|
||||||
var args []string
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return cfg, errors.Errorf("invalid value %s", field)
|
|
||||||
}
|
|
||||||
key := strings.ToLower(parts[0])
|
|
||||||
value := parts[1]
|
|
||||||
switch key {
|
|
||||||
case "args":
|
|
||||||
args = append(args, value) // TODO: support JSON
|
|
||||||
case "entrypoint":
|
|
||||||
entrypoint = value // TODO: support JSON
|
|
||||||
case "env":
|
|
||||||
cfg.Env = append(cfg.Env, value)
|
|
||||||
case "user":
|
|
||||||
cfg.User = value
|
|
||||||
case "cwd":
|
|
||||||
cfg.Cwd = value
|
|
||||||
case "tty":
|
|
||||||
cfg.Tty, err = strconv.ParseBool(value)
|
|
||||||
if err != nil {
|
|
||||||
return cfg, errors.Errorf("failed to parse tty: %v", err)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return cfg, errors.Errorf("unknown key %q", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cfg.Args = args
|
|
||||||
if entrypoint != "" {
|
|
||||||
cfg.Args = append([]string{entrypoint}, cfg.Args...)
|
|
||||||
}
|
|
||||||
if len(cfg.Args) == 0 {
|
|
||||||
cfg.Args = []string{"sh"}
|
|
||||||
}
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func printWarnings(w io.Writer, warnings []client.VertexWarning, mode string) {
|
|
||||||
if len(warnings) == 0 || mode == progress.PrinterModeQuiet {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "\n ")
|
|
||||||
sb := &bytes.Buffer{}
|
|
||||||
if len(warnings) == 1 {
|
|
||||||
fmt.Fprintf(sb, "1 warning found")
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(sb, "%d warnings found", len(warnings))
|
|
||||||
}
|
|
||||||
if logrus.GetLevel() < logrus.DebugLevel {
|
|
||||||
fmt.Fprintf(sb, " (use --debug to expand)")
|
|
||||||
}
|
|
||||||
fmt.Fprintf(sb, ":\n")
|
|
||||||
fmt.Fprint(w, aec.Apply(sb.String(), aec.YellowF))
|
|
||||||
|
|
||||||
for _, warn := range warnings {
|
|
||||||
fmt.Fprintf(w, " - %s\n", warn.Short)
|
|
||||||
if logrus.GetLevel() < logrus.DebugLevel {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, d := range warn.Detail {
|
|
||||||
fmt.Fprintf(w, "%s\n", d)
|
|
||||||
}
|
|
||||||
if warn.URL != "" {
|
|
||||||
fmt.Fprintf(w, "More info: %s\n", warn.URL)
|
|
||||||
}
|
|
||||||
if warn.SourceInfo != nil && warn.Range != nil {
|
|
||||||
src := errdefs.Source{
|
|
||||||
Info: warn.SourceInfo,
|
|
||||||
Ranges: warn.Range,
|
|
||||||
}
|
|
||||||
src.Print(w)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "\n")
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newBuildOptions() buildOptions {
|
func newBuildOptions() buildOptions {
|
||||||
@@ -445,65 +277,54 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
|
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, "Add a custom host-to-IP mapping (format: `host:ip`)")
|
||||||
flags.SetAnnotation("add-host", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host"})
|
flags.SetAnnotation("add-host", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host"})
|
||||||
|
|
||||||
flags.StringSliceVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
|
flags.StringSliceVar(&options.allow, "allow", []string{}, "Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`)")
|
||||||
|
|
||||||
flags.StringArrayVar(&options.buildArgs, "build-arg", []string{}, "Set build-time variables")
|
flags.StringArrayVar(&options.buildArgs, "build-arg", []string{}, "Set build-time variables")
|
||||||
|
flags.SetAnnotation("build-arg", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg"})
|
||||||
|
|
||||||
flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, `External cache sources (e.g., "user/app:cache", "type=local,src=path/to/dir")`)
|
flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, "External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`)")
|
||||||
|
|
||||||
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, `Cache export destinations (e.g., "user/app:cache", "type=local,dest=path/to/dir")`)
|
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, "Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`)")
|
||||||
|
|
||||||
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
|
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
|
||||||
flags.SetAnnotation("cgroup-parent", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent"})
|
flags.SetAnnotation("cgroup-parent", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent"})
|
||||||
|
|
||||||
flags.StringArrayVar(&options.contexts, "build-context", []string{}, "Additional build contexts (e.g., name=path)")
|
flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (default: `PATH/Dockerfile`)")
|
||||||
|
flags.SetAnnotation("file", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f"})
|
||||||
flags.StringVarP(&options.dockerfileName, "file", "f", "", `Name of the Dockerfile (default: "PATH/Dockerfile")`)
|
|
||||||
flags.SetAnnotation("file", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f"})
|
|
||||||
|
|
||||||
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
|
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
|
||||||
|
|
||||||
flags.StringArrayVar(&options.labels, "label", []string{}, "Set metadata for an image")
|
flags.StringArrayVar(&options.labels, "label", []string{}, "Set metadata for an image")
|
||||||
|
|
||||||
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--output=type=docker"`)
|
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for `--output=type=docker`")
|
||||||
|
|
||||||
flags.StringVar(&options.networkMode, "network", "default", `Set the networking mode for the "RUN" instructions during build`)
|
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
|
||||||
|
|
||||||
flags.StringArrayVar(&options.noCacheFilter, "no-cache-filter", []string{}, "Do not cache specified stages")
|
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: `type=local,dest=path`)")
|
||||||
|
|
||||||
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, `Output destination (format: "type=local,dest=path")`)
|
|
||||||
|
|
||||||
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
|
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
|
||||||
|
|
||||||
if isExperimental() {
|
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for `--output=type=registry`")
|
||||||
flags.StringVar(&options.printFunc, "print", "", "Print result of information request (outline, targets)")
|
|
||||||
}
|
|
||||||
|
|
||||||
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--output=type=registry"`)
|
|
||||||
|
|
||||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
||||||
|
|
||||||
flags.StringArrayVar(&options.secrets, "secret", []string{}, `Secret to expose to the build (format: "id=mysecret[,src=/local/secret]")`)
|
flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build (format: `id=mysecret,src=/local/secret`)")
|
||||||
|
|
||||||
flags.Var(&options.shmSize, "shm-size", `Size of "/dev/shm"`)
|
flags.Var(&options.shmSize, "shm-size", "Size of `/dev/shm`")
|
||||||
|
|
||||||
flags.StringArrayVar(&options.ssh, "ssh", []string{}, `SSH agent socket or keys to expose to the build (format: "default|<id>[=<socket>|<key>[,<key>]]")`)
|
flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (format: `default|<id>[=<socket>|<key>[,<key>]]`)")
|
||||||
|
|
||||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, `Name and optionally a tag (format: "name:tag")`)
|
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Name and optionally a tag (format: `name:tag`)")
|
||||||
flags.SetAnnotation("tag", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t"})
|
flags.SetAnnotation("tag", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t"})
|
||||||
|
|
||||||
flags.StringVar(&options.target, "target", "", "Set the target build stage to build")
|
flags.StringVar(&options.target, "target", "", "Set the target build stage to build.")
|
||||||
flags.SetAnnotation("target", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target"})
|
flags.SetAnnotation("target", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target"})
|
||||||
|
|
||||||
flags.Var(options.ulimits, "ulimit", "Ulimit options")
|
flags.Var(options.ulimits, "ulimit", "Ulimit options")
|
||||||
|
|
||||||
if isExperimental() {
|
|
||||||
flags.StringVar(&options.invoke, "invoke", "", "Invoke a command after the build. BUILDX_EXPERIMENTAL=1 is required.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// hidden flags
|
// hidden flags
|
||||||
var ignore string
|
var ignore string
|
||||||
var ignoreSlice []string
|
var ignoreSlice []string
|
||||||
@@ -528,7 +349,7 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
flags.StringVarP(&ignore, "memory", "m", "", "Memory limit")
|
flags.StringVarP(&ignore, "memory", "m", "", "Memory limit")
|
||||||
flags.MarkHidden("memory")
|
flags.MarkHidden("memory")
|
||||||
|
|
||||||
flags.StringVar(&ignore, "memory-swap", "", `Swap limit equal to memory plus swap: "-1" to enable unlimited swap`)
|
flags.StringVar(&ignore, "memory-swap", "", "Swap limit equal to memory plus swap: `-1` to enable unlimited swap")
|
||||||
flags.MarkHidden("memory-swap")
|
flags.MarkHidden("memory-swap")
|
||||||
|
|
||||||
flags.Int64VarP(&ignoreInt, "cpu-shares", "c", 0, "CPU shares (relative weight)")
|
flags.Int64VarP(&ignoreInt, "cpu-shares", "c", 0, "CPU shares (relative weight)")
|
||||||
@@ -540,10 +361,10 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
flags.Int64Var(&ignoreInt, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
|
flags.Int64Var(&ignoreInt, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
|
||||||
flags.MarkHidden("cpu-quota")
|
flags.MarkHidden("cpu-quota")
|
||||||
|
|
||||||
flags.StringVar(&ignore, "cpuset-cpus", "", `CPUs in which to allow execution ("0-3", "0,1")`)
|
flags.StringVar(&ignore, "cpuset-cpus", "", "CPUs in which to allow execution (`0-3`, `0,1`)")
|
||||||
flags.MarkHidden("cpuset-cpus")
|
flags.MarkHidden("cpuset-cpus")
|
||||||
|
|
||||||
flags.StringVar(&ignore, "cpuset-mems", "", `MEMs in which to allow execution ("0-3", "0,1")`)
|
flags.StringVar(&ignore, "cpuset-mems", "", "MEMs in which to allow execution (`0-3`, `0,1`)")
|
||||||
flags.MarkHidden("cpuset-mems")
|
flags.MarkHidden("cpuset-mems")
|
||||||
|
|
||||||
flags.BoolVar(&ignoreBool, "rm", true, "Remove intermediate containers after a successful build")
|
flags.BoolVar(&ignoreBool, "rm", true, "Remove intermediate containers after a successful build")
|
||||||
@@ -558,8 +379,8 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
|
|
||||||
func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) {
|
func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) {
|
||||||
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
|
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
|
||||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
|
flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output")
|
||||||
options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images")
|
options.pull = flags.Bool("pull", false, "Always attempt to pull a newer version of the image")
|
||||||
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to the file")
|
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to the file")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -571,6 +392,7 @@ func checkWarnedFlags(f *pflag.Flag) {
|
|||||||
switch t {
|
switch t {
|
||||||
case "flag-warn":
|
case "flag-warn":
|
||||||
logrus.Warn(m[0])
|
logrus.Warn(m[0])
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -594,116 +416,3 @@ func listToMap(values []string, defaultEnv bool) map[string]string {
|
|||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseContextNames(values []string) (map[string]build.NamedContext, error) {
|
|
||||||
if len(values) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
result := make(map[string]build.NamedContext, len(values))
|
|
||||||
for _, value := range values {
|
|
||||||
kv := strings.SplitN(value, "=", 2)
|
|
||||||
if len(kv) != 2 {
|
|
||||||
return nil, errors.Errorf("invalid context value: %s, expected key=value", value)
|
|
||||||
}
|
|
||||||
named, err := reference.ParseNormalizedNamed(kv[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "invalid context name %s", kv[0])
|
|
||||||
}
|
|
||||||
name := strings.TrimSuffix(reference.FamiliarString(named), ":latest")
|
|
||||||
result[name] = build.NamedContext{Path: kv[1]}
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parsePrintFunc(str string) (*build.PrintFunc, error) {
|
|
||||||
if str == "" {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(str))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f := &build.PrintFunc{}
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
if len(parts) == 2 {
|
|
||||||
if parts[0] == "format" {
|
|
||||||
f.Format = parts[1]
|
|
||||||
} else {
|
|
||||||
return nil, errors.Errorf("invalid print field: %s", field)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if f.Name != "" {
|
|
||||||
return nil, errors.Errorf("invalid print value: %s", str)
|
|
||||||
}
|
|
||||||
f.Name = field
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeMetadataFile(filename string, dt interface{}) error {
|
|
||||||
b, err := json.MarshalIndent(dt, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return ioutils.AtomicWriteFile(filename, b, 0644)
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} {
|
|
||||||
out := make(map[string]interface{})
|
|
||||||
for k, v := range exporterResponse {
|
|
||||||
dt, err := base64.StdEncoding.DecodeString(v)
|
|
||||||
if err != nil {
|
|
||||||
out[k] = v
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var raw map[string]interface{}
|
|
||||||
if err = json.Unmarshal(dt, &raw); err != nil || len(raw) == 0 {
|
|
||||||
out[k] = v
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
out[k] = json.RawMessage(dt)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func wrapBuildError(err error, bake bool) error {
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
st, ok := grpcerrors.AsGRPCStatus(err)
|
|
||||||
if ok {
|
|
||||||
if st.Code() == codes.Unimplemented && strings.Contains(st.Message(), "unsupported frontend capability moby.buildkit.frontend.contexts") {
|
|
||||||
msg := "current frontend does not support --build-context."
|
|
||||||
if bake {
|
|
||||||
msg = "current frontend does not support defining additional contexts for targets."
|
|
||||||
}
|
|
||||||
msg += " Named contexts are supported since Dockerfile v1.4. Use #syntax directive in Dockerfile or update to latest BuildKit."
|
|
||||||
return &wrapped{err, msg}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type wrapped struct {
|
|
||||||
err error
|
|
||||||
msg string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *wrapped) Error() string {
|
|
||||||
return w.msg
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *wrapped) Unwrap() error {
|
|
||||||
return w.err
|
|
||||||
}
|
|
||||||
|
|
||||||
func isExperimental() bool {
|
|
||||||
if v, ok := os.LookupEnv("BUILDX_EXPERIMENTAL"); ok {
|
|
||||||
vv, _ := strconv.ParseBool(v)
|
|
||||||
return vv
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -14,7 +14,6 @@ import (
|
|||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
"github.com/docker/buildx/util/confutil"
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/google/shlex"
|
"github.com/google/shlex"
|
||||||
@@ -61,6 +60,22 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
driverName := in.driver
|
||||||
|
if driverName == "" {
|
||||||
|
f, err := driver.GetDefaultFactory(ctx, dockerCli.Client(), true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if f == nil {
|
||||||
|
return errors.Errorf("no valid drivers found")
|
||||||
|
}
|
||||||
|
driverName = f.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
if driver.GetFactory(driverName, true) == nil {
|
||||||
|
return errors.Errorf("failed to find driver %q", in.driver)
|
||||||
|
}
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -75,19 +90,6 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !in.actionLeave && !in.actionAppend {
|
|
||||||
contexts, err := dockerCli.ContextStore().List()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, c := range contexts {
|
|
||||||
if c.Name == name {
|
|
||||||
logrus.Warnf("instance name %q already exists as context builder", name)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ng, err := txn.NodeGroupByName(name)
|
ng, err := txn.NodeGroupByName(name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(errors.Cause(err)) {
|
if os.IsNotExist(errors.Cause(err)) {
|
||||||
@@ -95,62 +97,29 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
logrus.Warnf("failed to find %q for append, creating a new instance instead", in.name)
|
logrus.Warnf("failed to find %q for append, creating a new instance instead", in.name)
|
||||||
}
|
}
|
||||||
if in.actionLeave {
|
if in.actionLeave {
|
||||||
return errors.Errorf("failed to find instance %q for leave", in.name)
|
return errors.Errorf("failed to find instance %q for leave", name)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
buildkitHost := os.Getenv("BUILDKIT_HOST")
|
|
||||||
|
|
||||||
driverName := in.driver
|
|
||||||
if driverName == "" {
|
|
||||||
if ng != nil {
|
|
||||||
driverName = ng.Driver
|
|
||||||
} else if len(args) == 0 && buildkitHost != "" {
|
|
||||||
driverName = "remote"
|
|
||||||
} else {
|
|
||||||
var arg string
|
|
||||||
if len(args) > 0 {
|
|
||||||
arg = args[0]
|
|
||||||
}
|
|
||||||
f, err := driver.GetDefaultFactory(ctx, arg, dockerCli.Client(), true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if f == nil {
|
|
||||||
return errors.Errorf("no valid drivers found")
|
|
||||||
}
|
|
||||||
driverName = f.Name()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ng != nil {
|
if ng != nil {
|
||||||
if in.nodeName == "" && !in.actionAppend {
|
if in.nodeName == "" && !in.actionAppend {
|
||||||
return errors.Errorf("existing instance for %q but no append mode, specify --node to make changes for existing instances", name)
|
return errors.Errorf("existing instance for %s but no append mode, specify --node to make changes for existing instances", name)
|
||||||
}
|
}
|
||||||
if driverName != ng.Driver {
|
|
||||||
return errors.Errorf("existing instance for %q but has mismatched driver %q", name, ng.Driver)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := driver.GetFactory(driverName, true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ngOriginal := ng
|
|
||||||
if ngOriginal != nil {
|
|
||||||
ngOriginal = ngOriginal.Copy()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if ng == nil {
|
if ng == nil {
|
||||||
ng = &store.NodeGroup{
|
ng = &store.NodeGroup{
|
||||||
Name: name,
|
Name: name,
|
||||||
Driver: driverName,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ng.Driver == "" || in.driver != "" {
|
||||||
|
ng.Driver = driverName
|
||||||
|
}
|
||||||
|
|
||||||
var flags []string
|
var flags []string
|
||||||
if in.flags != "" {
|
if in.flags != "" {
|
||||||
flags, err = shlex.Split(in.flags)
|
flags, err = shlex.Split(in.flags)
|
||||||
@@ -160,72 +129,44 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var ep string
|
var ep string
|
||||||
var setEp bool
|
|
||||||
if in.actionLeave {
|
if in.actionLeave {
|
||||||
if err := ng.Leave(in.nodeName); err != nil {
|
if err := ng.Leave(in.nodeName); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
switch {
|
if len(args) > 0 {
|
||||||
case driverName == "kubernetes":
|
ep, err = validateEndpoint(dockerCli, args[0])
|
||||||
if len(args) > 0 {
|
if err != nil {
|
||||||
logrus.Warnf("kubernetes driver does not support endpoint args %q", args[0])
|
return err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if dockerCli.CurrentContext() == "default" && dockerCli.DockerEndpoint().TLSData != nil {
|
||||||
|
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
|
||||||
|
}
|
||||||
|
|
||||||
|
ep, err = storeutil.GetCurrentEndpoint(dockerCli)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if in.driver == "kubernetes" {
|
||||||
// naming endpoint to make --append works
|
// naming endpoint to make --append works
|
||||||
ep = (&url.URL{
|
ep = (&url.URL{
|
||||||
Scheme: driverName,
|
Scheme: in.driver,
|
||||||
Path: "/" + in.name,
|
Path: "/" + in.name,
|
||||||
RawQuery: (&url.Values{
|
RawQuery: (&url.Values{
|
||||||
"deployment": {in.nodeName},
|
"deployment": {in.nodeName},
|
||||||
"kubeconfig": {os.Getenv("KUBECONFIG")},
|
"kubeconfig": {os.Getenv("KUBECONFIG")},
|
||||||
}).Encode(),
|
}).Encode(),
|
||||||
}).String()
|
}).String()
|
||||||
setEp = false
|
|
||||||
case driverName == "remote":
|
|
||||||
if len(args) > 0 {
|
|
||||||
ep = args[0]
|
|
||||||
} else if buildkitHost != "" {
|
|
||||||
ep = buildkitHost
|
|
||||||
} else {
|
|
||||||
return errors.Errorf("no remote endpoint provided")
|
|
||||||
}
|
|
||||||
ep, err = validateBuildkitEndpoint(ep)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
setEp = true
|
|
||||||
case len(args) > 0:
|
|
||||||
ep, err = validateEndpoint(dockerCli, args[0])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
setEp = true
|
|
||||||
default:
|
|
||||||
if dockerCli.CurrentContext() == "default" && dockerCli.DockerEndpoint().TLSData != nil {
|
|
||||||
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
|
|
||||||
}
|
|
||||||
ep, err = storeutil.GetCurrentEndpoint(dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
setEp = false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err := csvToMap(in.driverOpts)
|
m, err := csvToMap(in.driverOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := ng.Update(in.nodeName, ep, in.platform, len(args) > 0, in.actionAppend, flags, in.configFile, m); err != nil {
|
||||||
if in.configFile == "" {
|
|
||||||
// if buildkit config is not provided, check if the default one is
|
|
||||||
// available and use it
|
|
||||||
if f, ok := confutil.DefaultConfigFile(dockerCli); ok {
|
|
||||||
logrus.Warnf("Using default BuildKit config in %s", f)
|
|
||||||
in.configFile = f
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ng.Update(in.nodeName, ep, in.platform, setEp, in.actionAppend, flags, in.configFile, m); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -234,30 +175,6 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ngi := &nginfo{ng: ng}
|
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
if err = loadNodeGroupData(timeoutCtx, dockerCli, ngi); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, info := range ngi.drivers {
|
|
||||||
if err := info.di.Err; err != nil {
|
|
||||||
err := errors.Errorf("failed to initialize builder %s (%s): %s", ng.Name, info.di.Name, err)
|
|
||||||
var err2 error
|
|
||||||
if ngOriginal == nil {
|
|
||||||
err2 = txn.Remove(ng.Name)
|
|
||||||
} else {
|
|
||||||
err2 = txn.Save(ngOriginal)
|
|
||||||
}
|
|
||||||
if err2 != nil {
|
|
||||||
logrus.Warnf("Could not rollback to previous state: %s", err2)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if in.use && ep != "" {
|
if in.use && ep != "" {
|
||||||
current, err := storeutil.GetCurrentEndpoint(dockerCli)
|
current, err := storeutil.GetCurrentEndpoint(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -268,6 +185,15 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ngi := &nginfo{ng: ng}
|
||||||
|
|
||||||
|
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if err = loadNodeGroupData(timeoutCtx, dockerCli, ngi); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if in.bootstrap {
|
if in.bootstrap {
|
||||||
if _, err = boot(ctx, ngi); err != nil {
|
if _, err = boot(ctx, ngi); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -282,11 +208,11 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
var options createOptions
|
var options createOptions
|
||||||
|
|
||||||
var drivers bytes.Buffer
|
var drivers bytes.Buffer
|
||||||
for _, d := range driver.GetFactories(true) {
|
for _, d := range driver.GetFactories() {
|
||||||
if len(drivers.String()) > 0 {
|
if len(drivers.String()) > 0 {
|
||||||
drivers.WriteString(", ")
|
drivers.WriteString(", ")
|
||||||
}
|
}
|
||||||
drivers.WriteString(fmt.Sprintf(`"%s"`, d.Name()))
|
drivers.WriteString(fmt.Sprintf("`%s`", d.Name()))
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
@@ -320,9 +246,6 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func csvToMap(in []string) (map[string]string, error) {
|
func csvToMap(in []string) (map[string]string, error) {
|
||||||
if len(in) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
m := make(map[string]string, len(in))
|
m := make(map[string]string, len(in))
|
||||||
for _, s := range in {
|
for _, s := range in {
|
||||||
csvReader := csv.NewReader(strings.NewReader(s))
|
csvReader := csv.NewReader(strings.NewReader(s))
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
package commands
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestCsvToMap(t *testing.T) {
|
|
||||||
d := []string{
|
|
||||||
"\"tolerations=key=foo,value=bar;key=foo2,value=bar2\",replicas=1",
|
|
||||||
"namespace=default",
|
|
||||||
}
|
|
||||||
r, err := csvToMap(d)
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Contains(t, r, "tolerations")
|
|
||||||
require.Equal(t, r["tolerations"], "key=foo,value=bar;key=foo2,value=bar2")
|
|
||||||
|
|
||||||
require.Contains(t, r, "replicas")
|
|
||||||
require.Equal(t, r["replicas"], "1")
|
|
||||||
|
|
||||||
require.Contains(t, r, "namespace")
|
|
||||||
require.Equal(t, r["namespace"], "default")
|
|
||||||
}
|
|
||||||
@@ -4,18 +4,16 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/opts"
|
"github.com/docker/cli/opts"
|
||||||
"github.com/docker/go-units"
|
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/tonistiigi/units"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -127,20 +125,20 @@ func printKV(w io.Writer, k string, v interface{}) {
|
|||||||
func printVerbose(tw *tabwriter.Writer, du []*client.UsageInfo) {
|
func printVerbose(tw *tabwriter.Writer, du []*client.UsageInfo) {
|
||||||
for _, di := range du {
|
for _, di := range du {
|
||||||
printKV(tw, "ID", di.ID)
|
printKV(tw, "ID", di.ID)
|
||||||
if len(di.Parents) != 0 {
|
if di.Parent != "" {
|
||||||
printKV(tw, "Parent", strings.Join(di.Parents, ","))
|
printKV(tw, "Parent", di.Parent)
|
||||||
}
|
}
|
||||||
printKV(tw, "Created at", di.CreatedAt)
|
printKV(tw, "Created at", di.CreatedAt)
|
||||||
printKV(tw, "Mutable", di.Mutable)
|
printKV(tw, "Mutable", di.Mutable)
|
||||||
printKV(tw, "Reclaimable", !di.InUse)
|
printKV(tw, "Reclaimable", !di.InUse)
|
||||||
printKV(tw, "Shared", di.Shared)
|
printKV(tw, "Shared", di.Shared)
|
||||||
printKV(tw, "Size", units.HumanSize(float64(di.Size)))
|
printKV(tw, "Size", fmt.Sprintf("%.2f", units.Bytes(di.Size)))
|
||||||
if di.Description != "" {
|
if di.Description != "" {
|
||||||
printKV(tw, "Description", di.Description)
|
printKV(tw, "Description", di.Description)
|
||||||
}
|
}
|
||||||
printKV(tw, "Usage count", di.UsageCount)
|
printKV(tw, "Usage count", di.UsageCount)
|
||||||
if di.LastUsedAt != nil {
|
if di.LastUsedAt != nil {
|
||||||
printKV(tw, "Last used", units.HumanDuration(time.Since(*di.LastUsedAt))+" ago")
|
printKV(tw, "Last used", di.LastUsedAt)
|
||||||
}
|
}
|
||||||
if di.RecordType != "" {
|
if di.RecordType != "" {
|
||||||
printKV(tw, "Type", di.RecordType)
|
printKV(tw, "Type", di.RecordType)
|
||||||
@@ -161,15 +159,11 @@ func printTableRow(tw *tabwriter.Writer, di *client.UsageInfo) {
|
|||||||
if di.Mutable {
|
if di.Mutable {
|
||||||
id += "*"
|
id += "*"
|
||||||
}
|
}
|
||||||
size := units.HumanSize(float64(di.Size))
|
size := fmt.Sprintf("%.2f", units.Bytes(di.Size))
|
||||||
if di.Shared {
|
if di.Shared {
|
||||||
size += "*"
|
size += "*"
|
||||||
}
|
}
|
||||||
lastAccessed := ""
|
fmt.Fprintf(tw, "%-71s\t%-11v\t%s\t\n", id, !di.InUse, size)
|
||||||
if di.LastUsedAt != nil {
|
|
||||||
lastAccessed = units.HumanDuration(time.Since(*di.LastUsedAt)) + " ago"
|
|
||||||
}
|
|
||||||
fmt.Fprintf(tw, "%-40s\t%-5v\t%-10s\t%s\n", id, !di.InUse, size, lastAccessed)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) {
|
func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) {
|
||||||
@@ -192,11 +186,11 @@ func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if shared > 0 {
|
if shared > 0 {
|
||||||
fmt.Fprintf(tw, "Shared:\t%s\n", units.HumanSize(float64(shared)))
|
fmt.Fprintf(tw, "Shared:\t%.2f\n", units.Bytes(shared))
|
||||||
fmt.Fprintf(tw, "Private:\t%s\n", units.HumanSize(float64(total-shared)))
|
fmt.Fprintf(tw, "Private:\t%.2f\n", units.Bytes(total-shared))
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(tw, "Reclaimable:\t%s\n", units.HumanSize(float64(reclaimable)))
|
fmt.Fprintf(tw, "Reclaimable:\t%.2f\n", units.Bytes(reclaimable))
|
||||||
fmt.Fprintf(tw, "Total:\t%s\n", units.HumanSize(float64(total)))
|
fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total))
|
||||||
tw.Flush()
|
tw.Flush()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,16 +1,14 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"io/ioutil"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/buildx/util/progress"
|
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
@@ -27,7 +25,6 @@ type createOptions struct {
|
|||||||
tags []string
|
tags []string
|
||||||
dryrun bool
|
dryrun bool
|
||||||
actionAppend bool
|
actionAppend bool
|
||||||
progress string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
||||||
@@ -41,7 +38,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
|
|
||||||
fileArgs := make([]string, len(in.files))
|
fileArgs := make([]string, len(in.files))
|
||||||
for i, f := range in.files {
|
for i, f := range in.files {
|
||||||
dt, err := os.ReadFile(f)
|
dt, err := ioutil.ReadFile(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -81,21 +78,18 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
if len(repos) == 0 {
|
if len(repos) == 0 {
|
||||||
return errors.Errorf("no repositories specified, please set a reference in tag or source")
|
return errors.Errorf("no repositories specified, please set a reference in tag or source")
|
||||||
}
|
}
|
||||||
|
if len(repos) > 1 {
|
||||||
|
return errors.Errorf("multiple repositories currently not supported, found %v", repos)
|
||||||
|
}
|
||||||
|
|
||||||
var defaultRepo *string
|
var repo string
|
||||||
if len(repos) == 1 {
|
for r := range repos {
|
||||||
for repo := range repos {
|
repo = r
|
||||||
defaultRepo = &repo
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, s := range srcs {
|
for i, s := range srcs {
|
||||||
if s.Ref == nil && s.Desc.MediaType == "" && s.Desc.Digest != "" {
|
if s.Ref == nil && s.Desc.MediaType == "" && s.Desc.Digest != "" {
|
||||||
if defaultRepo == nil {
|
n, err := reference.ParseNormalizedNamed(repo)
|
||||||
return errors.Errorf("multiple repositories specified, cannot infer repository for %q", args[i])
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err := reference.ParseNormalizedNamed(*defaultRepo)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -149,6 +143,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
srcs[i].Ref = nil
|
||||||
if srcs[i].Desc.Digest == "" {
|
if srcs[i].Desc.Digest == "" {
|
||||||
srcs[i].Desc = desc
|
srcs[i].Desc = desc
|
||||||
} else {
|
} else {
|
||||||
@@ -167,7 +162,12 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dt, desc, err := r.Combine(ctx, srcs)
|
descs := make([]ocispec.Descriptor, len(srcs))
|
||||||
|
for i := range descs {
|
||||||
|
descs[i] = srcs[i].Desc
|
||||||
|
}
|
||||||
|
|
||||||
|
dt, desc, err := r.Combine(ctx, repo, descs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -180,49 +180,23 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
// new resolver cause need new auth
|
// new resolver cause need new auth
|
||||||
r = imagetools.New(imageopt)
|
r = imagetools.New(imageopt)
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
|
||||||
defer cancel()
|
|
||||||
printer := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
|
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
|
||||||
pw := progress.WithPrefix(printer, "internal", true)
|
|
||||||
|
|
||||||
for _, t := range tags {
|
for _, t := range tags {
|
||||||
t := t
|
if err := r.Push(ctx, t, desc, dt); err != nil {
|
||||||
eg.Go(func() error {
|
return err
|
||||||
return progress.Wrap(fmt.Sprintf("pushing %s", t.String()), pw.Write, func(sub progress.SubLogger) error {
|
}
|
||||||
eg2, _ := errgroup.WithContext(ctx)
|
fmt.Println(t.String())
|
||||||
for _, s := range srcs {
|
|
||||||
if reference.Domain(s.Ref) == reference.Domain(t) && reference.Path(s.Ref) == reference.Path(t) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s := s
|
|
||||||
eg2.Go(func() error {
|
|
||||||
sub.Log(1, []byte(fmt.Sprintf("copying %s from %s to %s\n", s.Desc.Digest.String(), s.Ref.String(), t.String())))
|
|
||||||
return r.Copy(ctx, s, t)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := eg2.Wait(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
sub.Log(1, []byte(fmt.Sprintf("pushing %s to %s\n", desc.Digest.String(), t.String())))
|
|
||||||
return r.Push(ctx, t, desc, dt)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = eg.Wait()
|
return nil
|
||||||
err1 := printer.Wait()
|
|
||||||
if err == nil {
|
|
||||||
err = err1
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseSources(in []string) ([]*imagetools.Source, error) {
|
type src struct {
|
||||||
out := make([]*imagetools.Source, len(in))
|
Desc ocispec.Descriptor
|
||||||
|
Ref reference.Named
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSources(in []string) ([]*src, error) {
|
||||||
|
out := make([]*src, len(in))
|
||||||
for i, in := range in {
|
for i, in := range in {
|
||||||
s, err := parseSource(in)
|
s, err := parseSource(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -245,11 +219,11 @@ func parseRefs(in []string) ([]reference.Named, error) {
|
|||||||
return refs, nil
|
return refs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseSource(in string) (*imagetools.Source, error) {
|
func parseSource(in string) (*src, error) {
|
||||||
// source can be a digest, reference or a descriptor JSON
|
// source can be a digest, reference or a descriptor JSON
|
||||||
dgst, err := digest.Parse(in)
|
dgst, err := digest.Parse(in)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return &imagetools.Source{
|
return &src{
|
||||||
Desc: ocispec.Descriptor{
|
Desc: ocispec.Descriptor{
|
||||||
Digest: dgst,
|
Digest: dgst,
|
||||||
},
|
},
|
||||||
@@ -260,14 +234,14 @@ func parseSource(in string) (*imagetools.Source, error) {
|
|||||||
|
|
||||||
ref, err := reference.ParseNormalizedNamed(in)
|
ref, err := reference.ParseNormalizedNamed(in)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return &imagetools.Source{
|
return &src{
|
||||||
Ref: ref,
|
Ref: ref,
|
||||||
}, nil
|
}, nil
|
||||||
} else if !strings.HasPrefix(in, "{") {
|
} else if !strings.HasPrefix(in, "{") {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var s imagetools.Source
|
var s src
|
||||||
if err := json.Unmarshal([]byte(in), &s.Desc); err != nil {
|
if err := json.Unmarshal([]byte(in), &s.Desc); err != nil {
|
||||||
return nil, errors.WithStack(err)
|
return nil, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
@@ -281,7 +255,7 @@ func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
|||||||
Use: "create [OPTIONS] [SOURCE] [SOURCE...]",
|
Use: "create [OPTIONS] [SOURCE] [SOURCE...]",
|
||||||
Short: "Create a new image based on source images",
|
Short: "Create a new image based on source images",
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.builder = *opts.Builder
|
options.builder = opts.Builder
|
||||||
return runCreate(dockerCli, options, args)
|
return runCreate(dockerCli, options, args)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -291,7 +265,6 @@ func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
|||||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image")
|
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image")
|
||||||
flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing")
|
flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing")
|
||||||
flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest")
|
flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest")
|
||||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,30 +1,28 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/images"
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/cli-docs-tool/annotation"
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/pkg/errors"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
type inspectOptions struct {
|
type inspectOptions struct {
|
||||||
builder string
|
|
||||||
format string
|
|
||||||
raw bool
|
raw bool
|
||||||
|
builder string
|
||||||
}
|
}
|
||||||
|
|
||||||
func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
if in.format != "" && in.raw {
|
|
||||||
return errors.Errorf("format and raw cannot be used together")
|
|
||||||
}
|
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -49,13 +47,28 @@ func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
r := imagetools.New(imageopt)
|
||||||
|
|
||||||
p, err := imagetools.NewPrinter(ctx, imageopt, name, in.format)
|
dt, desc, err := r.Get(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return p.Print(in.raw, dockerCli.Out())
|
if in.raw {
|
||||||
|
fmt.Printf("%s", dt) // avoid newline to keep digest
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch desc.MediaType {
|
||||||
|
// case images.MediaTypeDockerSchema2Manifest, specs.MediaTypeImageManifest:
|
||||||
|
// TODO: handle distribution manifest and schema1
|
||||||
|
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||||
|
return imagetools.PrintManifestList(dt, desc, name, os.Stdout)
|
||||||
|
default:
|
||||||
|
fmt.Printf("%s\n", dt)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
@@ -63,20 +76,16 @@ func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
|||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "inspect [OPTIONS] NAME",
|
Use: "inspect [OPTIONS] NAME",
|
||||||
Short: "Show details of an image in the registry",
|
Short: "Show details of image in the registry",
|
||||||
Args: cli.ExactArgs(1),
|
Args: cli.ExactArgs(1),
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.builder = *rootOpts.Builder
|
options.builder = rootOpts.Builder
|
||||||
return runInspect(dockerCli, options, args[0])
|
return runInspect(dockerCli, options, args[0])
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
flags.BoolVar(&options.raw, "raw", false, "Show original JSON manifest")
|
||||||
flags.StringVar(&options.format, "format", "", "Format the output using the given Go template")
|
|
||||||
flags.SetAnnotation("format", annotation.DefaultValue, []string{`"{{.Manifest}}"`})
|
|
||||||
|
|
||||||
flags.BoolVar(&options.raw, "raw", false, "Show original, unformatted JSON manifest")
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type RootOptions struct {
|
type RootOptions struct {
|
||||||
Builder *string
|
Builder string
|
||||||
}
|
}
|
||||||
|
|
||||||
func RootCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
func RootCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||||
@@ -16,8 +16,8 @@ func RootCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
cmd.AddCommand(
|
cmd.AddCommand(
|
||||||
createCmd(dockerCli, opts),
|
|
||||||
inspectCmd(dockerCli, opts),
|
inspectCmd(dockerCli, opts),
|
||||||
|
createCmd(dockerCli, opts),
|
||||||
)
|
)
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|||||||
@@ -79,7 +79,6 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
|||||||
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
|
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
|
||||||
fmt.Fprintf(w, "Name:\t%s\n", ngi.ng.Name)
|
fmt.Fprintf(w, "Name:\t%s\n", ngi.ng.Name)
|
||||||
fmt.Fprintf(w, "Driver:\t%s\n", ngi.ng.Driver)
|
fmt.Fprintf(w, "Driver:\t%s\n", ngi.ng.Driver)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
||||||
} else if ngi.err != nil {
|
} else if ngi.err != nil {
|
||||||
@@ -95,15 +94,6 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
|||||||
}
|
}
|
||||||
fmt.Fprintf(w, "Name:\t%s\n", n.Name)
|
fmt.Fprintf(w, "Name:\t%s\n", n.Name)
|
||||||
fmt.Fprintf(w, "Endpoint:\t%s\n", n.Endpoint)
|
fmt.Fprintf(w, "Endpoint:\t%s\n", n.Endpoint)
|
||||||
|
|
||||||
var driverOpts []string
|
|
||||||
for k, v := range n.DriverOpts {
|
|
||||||
driverOpts = append(driverOpts, fmt.Sprintf("%s=%q", k, v))
|
|
||||||
}
|
|
||||||
if len(driverOpts) > 0 {
|
|
||||||
fmt.Fprintf(w, "Driver Options:\t%s\n", strings.Join(driverOpts, " "))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ngi.drivers[i].di.Err; err != nil {
|
if err := ngi.drivers[i].di.Err; err != nil {
|
||||||
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
fmt.Fprintf(w, "Error:\t%s\n", err.Error())
|
||||||
} else if err := ngi.drivers[i].err; err != nil {
|
} else if err := ngi.drivers[i].err; err != nil {
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"sort"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
@@ -45,30 +45,23 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
|||||||
builders[i] = &nginfo{ng: ng}
|
builders[i] = &nginfo{ng: ng}
|
||||||
}
|
}
|
||||||
|
|
||||||
contexts, err := dockerCli.ContextStore().List()
|
list, err := dockerCli.ContextStore().List()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sort.Slice(contexts, func(i, j int) bool {
|
ctxbuilders := make([]*nginfo, len(list))
|
||||||
return contexts[i].Name < contexts[j].Name
|
for i, l := range list {
|
||||||
})
|
ctxbuilders[i] = &nginfo{ng: &store.NodeGroup{
|
||||||
for _, c := range contexts {
|
Name: l.Name,
|
||||||
ngi := &nginfo{ng: &store.NodeGroup{
|
|
||||||
Name: c.Name,
|
|
||||||
Nodes: []store.Node{{
|
Nodes: []store.Node{{
|
||||||
Name: c.Name,
|
Name: l.Name,
|
||||||
Endpoint: c.Name,
|
Endpoint: l.Name,
|
||||||
}},
|
}},
|
||||||
}}
|
}}
|
||||||
// if a context has the same name as an instance from the store, do not
|
|
||||||
// add it to the builders list. An instance from the store takes
|
|
||||||
// precedence over context builders.
|
|
||||||
if hasNodeGroup(builders, ngi) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
builders = append(builders, ngi)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
builders = append(builders, ctxbuilders...)
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
@@ -99,72 +92,49 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
w := tabwriter.NewWriter(dockerCli.Out(), 0, 0, 1, ' ', 0)
|
w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
|
||||||
fmt.Fprintf(w, "NAME/NODE\tDRIVER/ENDPOINT\tSTATUS\tBUILDKIT\tPLATFORMS\n")
|
fmt.Fprintf(w, "NAME/NODE\tDRIVER/ENDPOINT\tSTATUS\tPLATFORMS\n")
|
||||||
|
|
||||||
currentSet := false
|
currentSet := false
|
||||||
printErr := false
|
|
||||||
for _, b := range builders {
|
for _, b := range builders {
|
||||||
if !currentSet && b.ng.Name == currentName {
|
if !currentSet && b.ng.Name == currentName {
|
||||||
b.ng.Name += " *"
|
b.ng.Name += " *"
|
||||||
currentSet = true
|
currentSet = true
|
||||||
}
|
}
|
||||||
if ok := printngi(w, b); !ok {
|
printngi(w, b)
|
||||||
printErr = true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
w.Flush()
|
w.Flush()
|
||||||
|
|
||||||
if printErr {
|
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "\n")
|
|
||||||
for _, b := range builders {
|
|
||||||
if b.err != nil {
|
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "Cannot load builder %s: %s\n", b.ng.Name, strings.TrimSpace(b.err.Error()))
|
|
||||||
} else {
|
|
||||||
for idx, n := range b.ng.Nodes {
|
|
||||||
d := b.drivers[idx]
|
|
||||||
var nodeErr string
|
|
||||||
if d.err != nil {
|
|
||||||
nodeErr = d.err.Error()
|
|
||||||
} else if d.di.Err != nil {
|
|
||||||
nodeErr = d.di.Err.Error()
|
|
||||||
}
|
|
||||||
if nodeErr != "" {
|
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "Failed to get status for %s (%s): %s\n", b.ng.Name, n.Name, strings.TrimSpace(nodeErr))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func printngi(w io.Writer, ngi *nginfo) (ok bool) {
|
func printngi(w io.Writer, ngi *nginfo) {
|
||||||
ok = true
|
|
||||||
var err string
|
var err string
|
||||||
if ngi.err != nil {
|
if ngi.err != nil {
|
||||||
ok = false
|
err = ngi.err.Error()
|
||||||
err = "error"
|
|
||||||
}
|
}
|
||||||
fmt.Fprintf(w, "%s\t%s\t%s\t\t\n", ngi.ng.Name, ngi.ng.Driver, err)
|
fmt.Fprintf(w, "%s\t%s\t%s\t\n", ngi.ng.Name, ngi.ng.Driver, err)
|
||||||
if ngi.err == nil {
|
if ngi.err == nil {
|
||||||
for idx, n := range ngi.ng.Nodes {
|
for idx, n := range ngi.ng.Nodes {
|
||||||
d := ngi.drivers[idx]
|
d := ngi.drivers[idx]
|
||||||
|
var err string
|
||||||
|
if d.err != nil {
|
||||||
|
err = d.err.Error()
|
||||||
|
} else if d.di.Err != nil {
|
||||||
|
err = d.di.Err.Error()
|
||||||
|
}
|
||||||
var status string
|
var status string
|
||||||
if d.info != nil {
|
if d.info != nil {
|
||||||
status = d.info.Status.String()
|
status = d.info.Status.String()
|
||||||
}
|
}
|
||||||
if d.err != nil || d.di.Err != nil {
|
if err != "" {
|
||||||
ok = false
|
fmt.Fprintf(w, " %s\t%s\t%s\n", n.Name, n.Endpoint, err)
|
||||||
fmt.Fprintf(w, " %s\t%s\t%s\t\t\n", n.Name, n.Endpoint, "error")
|
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(w, " %s\t%s\t%s\t%s\t%s\n", n.Name, n.Endpoint, status, d.version, strings.Join(platformutil.FormatInGroups(n.Platforms, d.platforms), ", "))
|
fmt.Fprintf(w, " %s\t%s\t%s\t%s\n", n.Name, n.Endpoint, status, strings.Join(platformutil.FormatInGroups(n.Platforms, d.platforms), ", "))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func lsCmd(dockerCli command.Cli) *cobra.Command {
|
func lsCmd(dockerCli command.Cli) *cobra.Command {
|
||||||
|
|||||||
@@ -1,48 +0,0 @@
|
|||||||
package commands
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
|
||||||
"github.com/docker/docker/api/types/versions"
|
|
||||||
"github.com/moby/buildkit/frontend/subrequests"
|
|
||||||
"github.com/moby/buildkit/frontend/subrequests/outline"
|
|
||||||
"github.com/moby/buildkit/frontend/subrequests/targets"
|
|
||||||
)
|
|
||||||
|
|
||||||
func printResult(f *build.PrintFunc, res map[string]string) error {
|
|
||||||
switch f.Name {
|
|
||||||
case "outline":
|
|
||||||
return printValue(outline.PrintOutline, outline.SubrequestsOutlineDefinition.Version, f.Format, res)
|
|
||||||
case "targets":
|
|
||||||
return printValue(targets.PrintTargets, targets.SubrequestsTargetsDefinition.Version, f.Format, res)
|
|
||||||
case "subrequests.describe":
|
|
||||||
return printValue(subrequests.PrintDescribe, subrequests.SubrequestsDescribeDefinition.Version, f.Format, res)
|
|
||||||
default:
|
|
||||||
if dt, ok := res["result.txt"]; ok {
|
|
||||||
fmt.Print(dt)
|
|
||||||
} else {
|
|
||||||
log.Printf("%s %+v", f, res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type printFunc func([]byte, io.Writer) error
|
|
||||||
|
|
||||||
func printValue(printer printFunc, version string, format string, res map[string]string) error {
|
|
||||||
if format == "json" {
|
|
||||||
fmt.Fprintln(os.Stdout, res["result.json"])
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if res["version"] != "" && versions.LessThan(version, res["version"]) && res["result.txt"] != "" {
|
|
||||||
// structure is too new and we don't know how to print it
|
|
||||||
fmt.Fprint(os.Stdout, res["result.txt"])
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return printer([]byte(res["result.json"]), os.Stdout)
|
|
||||||
}
|
|
||||||
@@ -12,11 +12,11 @@ import (
|
|||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/opts"
|
"github.com/docker/cli/opts"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/go-units"
|
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/tonistiigi/units"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -119,7 +119,7 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
|
|||||||
<-printed
|
<-printed
|
||||||
|
|
||||||
tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
|
tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
|
||||||
fmt.Fprintf(tw, "Total:\t%s\n", units.HumanSize(float64(total)))
|
fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total))
|
||||||
tw.Flush()
|
tw.Flush()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -138,8 +138,8 @@ func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
flags.BoolVarP(&options.all, "all", "a", false, "Include internal/frontend images")
|
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
|
||||||
flags.Var(&options.filter, "filter", `Provide filter values (e.g., "until=24h")`)
|
flags.Var(&options.filter, "filter", "Provide filter values (e.g., `until=24h`)")
|
||||||
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
|
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
|
||||||
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
|
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
|
||||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
||||||
@@ -155,9 +155,9 @@ func toBuildkitPruneInfo(f filters.Args) (*client.PruneInfo, error) {
|
|||||||
if len(untilValues) > 0 && len(unusedForValues) > 0 {
|
if len(untilValues) > 0 && len(unusedForValues) > 0 {
|
||||||
return nil, errors.Errorf("conflicting filters %q and %q", "until", "unused-for")
|
return nil, errors.Errorf("conflicting filters %q and %q", "until", "unused-for")
|
||||||
}
|
}
|
||||||
untilKey := "until"
|
filterKey := "until"
|
||||||
if len(unusedForValues) > 0 {
|
if len(unusedForValues) > 0 {
|
||||||
untilKey = "unused-for"
|
filterKey = "unused-for"
|
||||||
}
|
}
|
||||||
untilValues = append(untilValues, unusedForValues...)
|
untilValues = append(untilValues, unusedForValues...)
|
||||||
|
|
||||||
@@ -168,27 +168,23 @@ func toBuildkitPruneInfo(f filters.Args) (*client.PruneInfo, error) {
|
|||||||
var err error
|
var err error
|
||||||
until, err = time.ParseDuration(untilValues[0])
|
until, err = time.ParseDuration(untilValues[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "%q filter expects a duration (e.g., '24h')", untilKey)
|
return nil, errors.Wrapf(err, "%q filter expects a duration (e.g., '24h')", filterKey)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("filters expect only one value")
|
return nil, errors.Errorf("filters expect only one value")
|
||||||
}
|
}
|
||||||
|
|
||||||
filters := make([]string, 0, f.Len())
|
bkFilter := make([]string, 0, f.Len())
|
||||||
for _, filterKey := range f.Keys() {
|
for _, field := range f.Keys() {
|
||||||
if filterKey == untilKey {
|
values := f.Get(field)
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
values := f.Get(filterKey)
|
|
||||||
switch len(values) {
|
switch len(values) {
|
||||||
case 0:
|
case 0:
|
||||||
filters = append(filters, filterKey)
|
bkFilter = append(bkFilter, field)
|
||||||
case 1:
|
case 1:
|
||||||
if filterKey == "id" {
|
if field == "id" {
|
||||||
filters = append(filters, filterKey+"~="+values[0])
|
bkFilter = append(bkFilter, field+"~="+values[0])
|
||||||
} else {
|
} else {
|
||||||
filters = append(filters, filterKey+"=="+values[0])
|
bkFilter = append(bkFilter, field+"=="+values[0])
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("filters expect only one value")
|
return nil, errors.Errorf("filters expect only one value")
|
||||||
@@ -196,6 +192,6 @@ func toBuildkitPruneInfo(f filters.Args) (*client.PruneInfo, error) {
|
|||||||
}
|
}
|
||||||
return &client.PruneInfo{
|
return &client.PruneInfo{
|
||||||
KeepDuration: until,
|
KeepDuration: until,
|
||||||
Filter: []string{strings.Join(filters, ",")},
|
Filter: []string{strings.Join(bkFilter, ",")},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
124
commands/rm.go
124
commands/rm.go
@@ -2,83 +2,53 @@ package commands
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type rmOptions struct {
|
type rmOptions struct {
|
||||||
builder string
|
builder string
|
||||||
keepState bool
|
keepState bool
|
||||||
keepDaemon bool
|
|
||||||
allInactive bool
|
|
||||||
force bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
rmInactiveWarning = `WARNING! This will remove all builders that are not in running state. Are you sure you want to continue?`
|
|
||||||
)
|
|
||||||
|
|
||||||
func runRm(dockerCli command.Cli, in rmOptions) error {
|
func runRm(dockerCli command.Cli, in rmOptions) error {
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
if in.allInactive && !in.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), rmInactiveWarning) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer release()
|
defer release()
|
||||||
|
|
||||||
if in.allInactive {
|
|
||||||
return rmAllInactive(ctx, txn, dockerCli, in)
|
|
||||||
}
|
|
||||||
|
|
||||||
var ng *store.NodeGroup
|
|
||||||
if in.builder != "" {
|
if in.builder != "" {
|
||||||
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
ng, err := storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
err1 := rm(ctx, dockerCli, ng, in.keepState)
|
||||||
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
if err := txn.Remove(ng.Name); err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
|
}
|
||||||
|
return err1
|
||||||
|
}
|
||||||
|
|
||||||
|
ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if ng != nil {
|
||||||
|
err1 := rm(ctx, dockerCli, ng, in.keepState)
|
||||||
|
if err := txn.Remove(ng.Name); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if ng == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ctxbuilders, err := dockerCli.ContextStore().List()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, cb := range ctxbuilders {
|
|
||||||
if ng.Driver == "docker" && len(ng.Nodes) == 1 && ng.Nodes[0].Endpoint == cb.Name {
|
|
||||||
return errors.Errorf("context builder cannot be removed, run `docker context rm %s` to remove this context", cb.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
err1 := rm(ctx, dockerCli, in, ng)
|
|
||||||
if err := txn.Remove(ng.Name); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err1 != nil {
|
|
||||||
return err1
|
return err1
|
||||||
}
|
}
|
||||||
|
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", ng.Name)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -92,9 +62,6 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.builder = rootOpts.builder
|
options.builder = rootOpts.builder
|
||||||
if len(args) > 0 {
|
if len(args) > 0 {
|
||||||
if options.allInactive {
|
|
||||||
return errors.New("cannot specify builder name when --all-inactive is set")
|
|
||||||
}
|
|
||||||
options.builder = args[0]
|
options.builder = args[0]
|
||||||
}
|
}
|
||||||
return runRm(dockerCli, options)
|
return runRm(dockerCli, options)
|
||||||
@@ -103,30 +70,23 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
flags.BoolVar(&options.keepState, "keep-state", false, "Keep BuildKit state")
|
flags.BoolVar(&options.keepState, "keep-state", false, "Keep BuildKit state")
|
||||||
flags.BoolVar(&options.keepDaemon, "keep-daemon", false, "Keep the buildkitd daemon running")
|
|
||||||
flags.BoolVar(&options.allInactive, "all-inactive", false, "Remove all inactive builders")
|
|
||||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func rm(ctx context.Context, dockerCli command.Cli, in rmOptions, ng *store.NodeGroup) error {
|
func rm(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, keepState bool) error {
|
||||||
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
|
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, di := range dis {
|
for _, di := range dis {
|
||||||
if di.Driver == nil {
|
if di.Driver != nil {
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Do not stop the buildkitd daemon when --keep-daemon is provided
|
|
||||||
if !in.keepDaemon {
|
|
||||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
if err := di.Driver.Stop(ctx, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
if err := di.Driver.Rm(ctx, true, !keepState); err != nil {
|
||||||
if err := di.Driver.Rm(ctx, true, !in.keepState, !in.keepDaemon); err != nil {
|
return err
|
||||||
return err
|
}
|
||||||
}
|
}
|
||||||
if di.Err != nil {
|
if di.Err != nil {
|
||||||
err = di.Err
|
err = di.Err
|
||||||
@@ -134,43 +94,3 @@ func rm(ctx context.Context, dockerCli command.Cli, in rmOptions, ng *store.Node
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, in rmOptions) error {
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
ll, err := txn.List()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
builders := make([]*nginfo, len(ll))
|
|
||||||
for i, ng := range ll {
|
|
||||||
builders[i] = &nginfo{ng: ng}
|
|
||||||
}
|
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
|
||||||
for _, b := range builders {
|
|
||||||
func(b *nginfo) {
|
|
||||||
eg.Go(func() error {
|
|
||||||
if err := loadNodeGroupData(ctx, dockerCli, b); err != nil {
|
|
||||||
return errors.Wrapf(err, "cannot load %s", b.ng.Name)
|
|
||||||
}
|
|
||||||
if b.ng.Dynamic {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if b.inactive() {
|
|
||||||
rmerr := rm(ctx, dockerCli, in, b.ng)
|
|
||||||
if err := txn.Remove(b.ng.Name); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
_, _ = fmt.Fprintf(dockerCli.Err(), "%s removed\n", b.ng.Name)
|
|
||||||
return rmerr
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
return eg.Wait()
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -4,12 +4,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
||||||
"github.com/docker/buildx/util/logutil"
|
|
||||||
"github.com/docker/cli-docs-tool/annotation"
|
|
||||||
"github.com/docker/cli/cli"
|
|
||||||
"github.com/docker/cli/cli-plugins/plugin"
|
"github.com/docker/cli/cli-plugins/plugin"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
@@ -19,45 +15,13 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
|||||||
Short: "Docker Buildx",
|
Short: "Docker Buildx",
|
||||||
Long: `Extended build capabilities with BuildKit`,
|
Long: `Extended build capabilities with BuildKit`,
|
||||||
Use: name,
|
Use: name,
|
||||||
Annotations: map[string]string{
|
|
||||||
annotation.CodeDelimiter: `"`,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
if isPlugin {
|
if isPlugin {
|
||||||
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
|
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
|
||||||
return plugin.PersistentPreRunE(cmd, args)
|
return plugin.PersistentPreRunE(cmd, args)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// match plugin behavior for standalone mode
|
|
||||||
// https://github.com/docker/cli/blob/6c9eb708fa6d17765d71965f90e1c59cea686ee9/cli-plugins/plugin/plugin.go#L117-L127
|
|
||||||
cmd.SilenceUsage = true
|
|
||||||
cmd.SilenceErrors = true
|
|
||||||
cmd.TraverseChildren = true
|
|
||||||
cmd.DisableFlagsInUseLine = true
|
|
||||||
cli.DisableFlagsInUseLine(cmd)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.SetFormatter(&logutil.Formatter{})
|
|
||||||
|
|
||||||
logrus.AddHook(logutil.NewFilter([]logrus.Level{
|
|
||||||
logrus.DebugLevel,
|
|
||||||
},
|
|
||||||
"serving grpc connection",
|
|
||||||
"stopping session",
|
|
||||||
"using default config store",
|
|
||||||
))
|
|
||||||
|
|
||||||
// filter out useless commandConn.CloseWrite warning message that can occur
|
|
||||||
// when listing builder instances with "buildx ls" for those that are
|
|
||||||
// unreachable: "commandConn.CloseWrite: commandconn: failed to wait: signal: killed"
|
|
||||||
// https://github.com/docker/cli/blob/3fb4fb83dfb5db0c0753a8316f21aea54dab32c5/cli/connhelper/commandconn/commandconn.go#L203-L214
|
|
||||||
logrus.AddHook(logutil.NewFilter([]logrus.Level{
|
|
||||||
logrus.WarnLevel,
|
|
||||||
},
|
|
||||||
"commandConn.CloseWrite:",
|
|
||||||
"commandConn.CloseRead:",
|
|
||||||
))
|
|
||||||
|
|
||||||
addCommands(cmd, dockerCli)
|
addCommands(cmd, dockerCli)
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
@@ -84,7 +48,7 @@ func addCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
|||||||
versionCmd(dockerCli),
|
versionCmd(dockerCli),
|
||||||
pruneCmd(dockerCli, opts),
|
pruneCmd(dockerCli, opts),
|
||||||
duCmd(dockerCli, opts),
|
duCmd(dockerCli, opts),
|
||||||
imagetoolscmd.RootCmd(dockerCli, imagetoolscmd.RootOptions{Builder: &opts.builder}),
|
imagetoolscmd.RootCmd(dockerCli, imagetoolscmd.RootOptions{Builder: opts.builder}),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -8,23 +8,20 @@ import (
|
|||||||
|
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
|
|
||||||
remoteutil "github.com/docker/buildx/driver/remote/util"
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/context/docker"
|
"github.com/docker/cli/cli/context/docker"
|
||||||
|
"github.com/docker/cli/cli/context/kubernetes"
|
||||||
ctxstore "github.com/docker/cli/cli/context/store"
|
ctxstore "github.com/docker/cli/cli/context/store"
|
||||||
dopts "github.com/docker/cli/opts"
|
dopts "github.com/docker/cli/opts"
|
||||||
dockerclient "github.com/docker/docker/client"
|
dockerclient "github.com/docker/docker/client"
|
||||||
"github.com/moby/buildkit/util/grpcerrors"
|
|
||||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -44,14 +41,6 @@ func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
|||||||
return h, nil
|
return h, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateBuildkitEndpoint validates that endpoint is a valid buildkit host
|
|
||||||
func validateBuildkitEndpoint(ep string) (string, error) {
|
|
||||||
if err := remoteutil.IsValidEndpoint(ep); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return ep, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// driversForNodeGroup returns drivers for a nodegroup instance
|
// driversForNodeGroup returns drivers for a nodegroup instance
|
||||||
func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, contextPathHash string) ([]build.DriverInfo, error) {
|
func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, contextPathHash string) ([]build.DriverInfo, error) {
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
@@ -60,27 +49,16 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
|
|||||||
|
|
||||||
var f driver.Factory
|
var f driver.Factory
|
||||||
if ng.Driver != "" {
|
if ng.Driver != "" {
|
||||||
var err error
|
f = driver.GetFactory(ng.Driver, true)
|
||||||
f, err = driver.GetFactory(ng.Driver, true)
|
if f == nil {
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Errorf("failed to find driver %q", f)
|
return nil, errors.Errorf("failed to find driver %q", f)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// empty driver means nodegroup was implicitly created as a default
|
dockerapi, err := clientForEndpoint(dockerCli, ng.Nodes[0].Endpoint)
|
||||||
// driver for a docker context and allows falling back to a
|
|
||||||
// docker-container driver for older daemon that doesn't support
|
|
||||||
// buildkit (< 18.06).
|
|
||||||
ep := ng.Nodes[0].Endpoint
|
|
||||||
dockerapi, err := clientForEndpoint(dockerCli, ep)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
// check if endpoint is healthy is needed to determine the driver type.
|
f, err = driver.GetDefaultFactory(ctx, dockerapi, false)
|
||||||
// if this fails then can't continue with driver selection.
|
|
||||||
if _, err = dockerapi.Ping(ctx); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
f, err = driver.GetDefaultFactory(ctx, ep, dockerapi, false)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -95,14 +73,12 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
|
|||||||
func(i int, n store.Node) {
|
func(i int, n store.Node) {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
di := build.DriverInfo{
|
di := build.DriverInfo{
|
||||||
Name: n.Name,
|
Name: n.Name,
|
||||||
Platform: n.Platforms,
|
Platform: n.Platforms,
|
||||||
ProxyConfig: storeutil.GetProxyConfig(dockerCli),
|
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
dis[i] = di
|
dis[i] = di
|
||||||
}()
|
}()
|
||||||
|
|
||||||
dockerapi, err := clientForEndpoint(dockerCli, n.Endpoint)
|
dockerapi, err := clientForEndpoint(dockerCli, n.Endpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
di.Err = err
|
di.Err = err
|
||||||
@@ -141,7 +117,7 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, f, n.Endpoint, dockerapi, imageopt.Auth, kcc, n.Flags, n.Files, n.DriverOpts, n.Platforms, contextPathHash)
|
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, f, dockerapi, imageopt.Auth, kcc, n.Flags, n.Files, n.DriverOpts, n.Platforms, contextPathHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
di.Err = err
|
di.Err = err
|
||||||
return nil
|
return nil
|
||||||
@@ -173,7 +149,7 @@ func configFromContext(endpointName string, s ctxstore.Reader) (clientcmd.Client
|
|||||||
}
|
}
|
||||||
return clientcmd.NewDefaultClientConfig(*apiConfig, &clientcmd.ConfigOverrides{}), nil
|
return clientcmd.NewDefaultClientConfig(*apiConfig, &clientcmd.ConfigOverrides{}), nil
|
||||||
}
|
}
|
||||||
return ctxkube.ConfigFromContext(endpointName, s)
|
return kubernetes.ConfigFromContext(endpointName, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
// clientForEndpoint returns a docker client for an endpoint
|
// clientForEndpoint returns a docker client for an endpoint
|
||||||
@@ -282,16 +258,15 @@ func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly b
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_default", nil, "", dockerCli.Client(), imageopt.Auth, nil, nil, nil, nil, nil, contextPathHash)
|
d, err := driver.GetDriver(ctx, "buildx_buildkit_default", nil, dockerCli.Client(), imageopt.Auth, nil, nil, nil, nil, nil, contextPathHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return []build.DriverInfo{
|
return []build.DriverInfo{
|
||||||
{
|
{
|
||||||
Name: "default",
|
Name: "default",
|
||||||
Driver: d,
|
Driver: d,
|
||||||
ImageOpt: imageopt,
|
ImageOpt: imageopt,
|
||||||
ProxyConfig: storeutil.GetProxyConfig(dockerCli),
|
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@@ -315,20 +290,11 @@ func loadInfoData(ctx context.Context, d *dinfo) error {
|
|||||||
return errors.Wrap(err, "listing workers")
|
return errors.Wrap(err, "listing workers")
|
||||||
}
|
}
|
||||||
for _, w := range workers {
|
for _, w := range workers {
|
||||||
d.platforms = append(d.platforms, w.Platforms...)
|
for _, p := range w.Platforms {
|
||||||
|
d.platforms = append(d.platforms, p)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
d.platforms = platformutil.Dedupe(d.platforms)
|
d.platforms = platformutil.Dedupe(d.platforms)
|
||||||
inf, err := c.Info(ctx)
|
|
||||||
if err != nil {
|
|
||||||
if st, ok := grpcerrors.AsGRPCStatus(err); ok && st.Code() == codes.Unimplemented {
|
|
||||||
d.version, err = d.di.Driver.Version(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "getting version")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
d.version = inf.BuildkitVersion.Version
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -397,15 +363,6 @@ func loadNodeGroupData(ctx context.Context, dockerCli command.Cli, ngi *nginfo)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasNodeGroup(list []*nginfo, ngi *nginfo) bool {
|
|
||||||
for _, l := range list {
|
|
||||||
if ngi.ng.Name == l.ng.Name {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func dockerAPI(dockerCli command.Cli) *api {
|
func dockerAPI(dockerCli command.Cli) *api {
|
||||||
return &api{dockerCli: dockerCli}
|
return &api{dockerCli: dockerCli}
|
||||||
}
|
}
|
||||||
@@ -425,7 +382,6 @@ type dinfo struct {
|
|||||||
di *build.DriverInfo
|
di *build.DriverInfo
|
||||||
info *driver.Info
|
info *driver.Info
|
||||||
platforms []specs.Platform
|
platforms []specs.Platform
|
||||||
version string
|
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -435,17 +391,6 @@ type nginfo struct {
|
|||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
// inactive checks if all nodes are inactive for this builder
|
|
||||||
func (n *nginfo) inactive() bool {
|
|
||||||
for idx := range n.ng.Nodes {
|
|
||||||
d := n.drivers[idx]
|
|
||||||
if d.info != nil && d.info.Status == driver.Running {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func boot(ctx context.Context, ngi *nginfo) (bool, error) {
|
func boot(ctx context.Context, ngi *nginfo) (bool, error) {
|
||||||
toBoot := make([]int, 0, len(ngi.drivers))
|
toBoot := make([]int, 0, len(ngi.drivers))
|
||||||
for i, d := range ngi.drivers {
|
for i, d := range ngi.drivers {
|
||||||
@@ -460,7 +405,7 @@ func boot(ctx context.Context, ngi *nginfo) (bool, error) {
|
|||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
printer := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, "auto")
|
printer := progress.NewPrinter(context.TODO(), os.Stderr, "auto")
|
||||||
|
|
||||||
baseCtx := ctx
|
baseCtx := ctx
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
variable "GO_VERSION" {
|
variable "GO_VERSION" {
|
||||||
default = "1.18"
|
default = "1.17"
|
||||||
}
|
}
|
||||||
variable "BIN_OUT" {
|
variable "BIN_OUT" {
|
||||||
default = "./bin"
|
default = "./bin"
|
||||||
@@ -89,7 +89,6 @@ target "mod-outdated" {
|
|||||||
inherits = ["_common"]
|
inherits = ["_common"]
|
||||||
dockerfile = "./hack/dockerfiles/vendor.Dockerfile"
|
dockerfile = "./hack/dockerfiles/vendor.Dockerfile"
|
||||||
target = "outdated"
|
target = "outdated"
|
||||||
no-cache-filter = ["outdated"]
|
|
||||||
output = ["type=cacheonly"]
|
output = ["type=cacheonly"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -16,7 +16,6 @@ import (
|
|||||||
_ "github.com/docker/buildx/driver/docker"
|
_ "github.com/docker/buildx/driver/docker"
|
||||||
_ "github.com/docker/buildx/driver/docker-container"
|
_ "github.com/docker/buildx/driver/docker-container"
|
||||||
_ "github.com/docker/buildx/driver/kubernetes"
|
_ "github.com/docker/buildx/driver/kubernetes"
|
||||||
_ "github.com/docker/buildx/driver/remote"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultSourcePath = "docs/reference/"
|
const defaultSourcePath = "docs/reference/"
|
||||||
|
|||||||
@@ -1,74 +0,0 @@
|
|||||||
# Defining additional build contexts and linking targets
|
|
||||||
|
|
||||||
In addition to the main `context` key that defines the build context each target
|
|
||||||
can also define additional named contexts with a map defined with key `contexts`.
|
|
||||||
These values map to the `--build-context` flag in the [build command](https://docs.docker.com/engine/reference/commandline/buildx_build/#build-context).
|
|
||||||
|
|
||||||
Inside the Dockerfile these contexts can be used with the `FROM` instruction or `--from` flag.
|
|
||||||
|
|
||||||
The value can be a local source directory, container image (with `docker-image://` prefix),
|
|
||||||
Git URL, HTTP URL or a name of another target in the Bake file (with `target:` prefix).
|
|
||||||
|
|
||||||
## Pinning alpine image
|
|
||||||
|
|
||||||
```dockerfile
|
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
FROM alpine
|
|
||||||
RUN echo "Hello world"
|
|
||||||
```
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
target "app" {
|
|
||||||
contexts = {
|
|
||||||
alpine = "docker-image://alpine:3.13"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Using a secondary source directory
|
|
||||||
|
|
||||||
```dockerfile
|
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
FROM scratch AS src
|
|
||||||
|
|
||||||
FROM golang
|
|
||||||
COPY --from=src . .
|
|
||||||
```
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
target "app" {
|
|
||||||
contexts = {
|
|
||||||
src = "../path/to/source"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Using a result of one target as a base image in another target
|
|
||||||
|
|
||||||
To use a result of one target as a build context of another, specity the target
|
|
||||||
name with `target:` prefix.
|
|
||||||
|
|
||||||
```dockerfile
|
|
||||||
# syntax=docker/dockerfile:1
|
|
||||||
FROM baseapp
|
|
||||||
RUN echo "Hello world"
|
|
||||||
```
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
target "base" {
|
|
||||||
dockerfile = "baseapp.Dockerfile"
|
|
||||||
}
|
|
||||||
|
|
||||||
target "app" {
|
|
||||||
contexts = {
|
|
||||||
baseapp = "target:base"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Please note that in most cases you should just use a single multi-stage
|
|
||||||
Dockerfile with multiple targets for similar behavior. This case is recommended
|
|
||||||
when you have multiple Dockerfiles that can't be easily merged into one.
|
|
||||||
@@ -1,270 +0,0 @@
|
|||||||
# Building from Compose file
|
|
||||||
|
|
||||||
## Specification
|
|
||||||
|
|
||||||
Bake uses the [compose-spec](https://docs.docker.com/compose/compose-file/) to
|
|
||||||
parse a compose file and translate each service to a [target](file-definition.md#target).
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# docker-compose.yml
|
|
||||||
services:
|
|
||||||
webapp-dev:
|
|
||||||
build: &build-dev
|
|
||||||
dockerfile: Dockerfile.webapp
|
|
||||||
tags:
|
|
||||||
- docker.io/username/webapp:latest
|
|
||||||
cache_from:
|
|
||||||
- docker.io/username/webapp:cache
|
|
||||||
cache_to:
|
|
||||||
- docker.io/username/webapp:cache
|
|
||||||
|
|
||||||
webapp-release:
|
|
||||||
build:
|
|
||||||
<<: *build-dev
|
|
||||||
x-bake:
|
|
||||||
platforms:
|
|
||||||
- linux/amd64
|
|
||||||
- linux/arm64
|
|
||||||
|
|
||||||
db:
|
|
||||||
image: docker.io/username/db
|
|
||||||
build:
|
|
||||||
dockerfile: Dockerfile.db
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"db",
|
|
||||||
"webapp-dev",
|
|
||||||
"webapp-release"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"db": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile.db",
|
|
||||||
"tags": [
|
|
||||||
"docker.io/username/db"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"webapp-dev": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile.webapp",
|
|
||||||
"tags": [
|
|
||||||
"docker.io/username/webapp:latest"
|
|
||||||
],
|
|
||||||
"cache-from": [
|
|
||||||
"docker.io/username/webapp:cache"
|
|
||||||
],
|
|
||||||
"cache-to": [
|
|
||||||
"docker.io/username/webapp:cache"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"webapp-release": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile.webapp",
|
|
||||||
"tags": [
|
|
||||||
"docker.io/username/webapp:latest"
|
|
||||||
],
|
|
||||||
"cache-from": [
|
|
||||||
"docker.io/username/webapp:cache"
|
|
||||||
],
|
|
||||||
"cache-to": [
|
|
||||||
"docker.io/username/webapp:cache"
|
|
||||||
],
|
|
||||||
"platforms": [
|
|
||||||
"linux/amd64",
|
|
||||||
"linux/arm64"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Unlike the [HCL format](file-definition.md#hcl-definition), there are some
|
|
||||||
limitations with the compose format:
|
|
||||||
|
|
||||||
* Specifying variables or global scope attributes is not yet supported
|
|
||||||
* `inherits` service field is not supported, but you can use [YAML anchors](https://docs.docker.com/compose/compose-file/#fragments) to reference other services like the example above
|
|
||||||
|
|
||||||
## `.env` file
|
|
||||||
|
|
||||||
You can declare default environment variables in an environment file named
|
|
||||||
`.env`. This file will be loaded from the current working directory,
|
|
||||||
where the command is executed and applied to compose definitions passed
|
|
||||||
with `-f`.
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# docker-compose.yml
|
|
||||||
services:
|
|
||||||
webapp:
|
|
||||||
image: docker.io/username/webapp:${TAG:-v1.0.0}
|
|
||||||
build:
|
|
||||||
dockerfile: Dockerfile
|
|
||||||
```
|
|
||||||
|
|
||||||
```
|
|
||||||
# .env
|
|
||||||
TAG=v1.1.0
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"webapp"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"webapp": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"tags": [
|
|
||||||
"docker.io/username/webapp:v1.1.0"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
> **Note**
|
|
||||||
>
|
|
||||||
> System environment variables take precedence over environment variables
|
|
||||||
> in `.env` file.
|
|
||||||
|
|
||||||
## Extension field with `x-bake`
|
|
||||||
|
|
||||||
Even if some fields are not (yet) available in the compose specification, you
|
|
||||||
can use the [special extension](https://docs.docker.com/compose/compose-file/#extension)
|
|
||||||
field `x-bake` in your compose file to evaluate extra fields:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# docker-compose.yml
|
|
||||||
services:
|
|
||||||
addon:
|
|
||||||
image: ct-addon:bar
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: ./Dockerfile
|
|
||||||
args:
|
|
||||||
CT_ECR: foo
|
|
||||||
CT_TAG: bar
|
|
||||||
x-bake:
|
|
||||||
tags:
|
|
||||||
- ct-addon:foo
|
|
||||||
- ct-addon:alp
|
|
||||||
platforms:
|
|
||||||
- linux/amd64
|
|
||||||
- linux/arm64
|
|
||||||
cache-from:
|
|
||||||
- user/app:cache
|
|
||||||
- type=local,src=path/to/cache
|
|
||||||
cache-to:
|
|
||||||
- type=local,dest=path/to/cache
|
|
||||||
pull: true
|
|
||||||
|
|
||||||
aws:
|
|
||||||
image: ct-fake-aws:bar
|
|
||||||
build:
|
|
||||||
dockerfile: ./aws.Dockerfile
|
|
||||||
args:
|
|
||||||
CT_ECR: foo
|
|
||||||
CT_TAG: bar
|
|
||||||
x-bake:
|
|
||||||
secret:
|
|
||||||
- id=mysecret,src=./secret
|
|
||||||
- id=mysecret2,src=./secret2
|
|
||||||
platforms: linux/arm64
|
|
||||||
output: type=docker
|
|
||||||
no-cache: true
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"aws",
|
|
||||||
"addon"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"addon": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "./Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"CT_ECR": "foo",
|
|
||||||
"CT_TAG": "bar"
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"ct-addon:foo",
|
|
||||||
"ct-addon:alp"
|
|
||||||
],
|
|
||||||
"cache-from": [
|
|
||||||
"user/app:cache",
|
|
||||||
"type=local,src=path/to/cache"
|
|
||||||
],
|
|
||||||
"cache-to": [
|
|
||||||
"type=local,dest=path/to/cache"
|
|
||||||
],
|
|
||||||
"platforms": [
|
|
||||||
"linux/amd64",
|
|
||||||
"linux/arm64"
|
|
||||||
],
|
|
||||||
"pull": true
|
|
||||||
},
|
|
||||||
"aws": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "./aws.Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"CT_ECR": "foo",
|
|
||||||
"CT_TAG": "bar"
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"ct-fake-aws:bar"
|
|
||||||
],
|
|
||||||
"secret": [
|
|
||||||
"id=mysecret,src=./secret",
|
|
||||||
"id=mysecret2,src=./secret2"
|
|
||||||
],
|
|
||||||
"platforms": [
|
|
||||||
"linux/arm64"
|
|
||||||
],
|
|
||||||
"output": [
|
|
||||||
"type=docker"
|
|
||||||
],
|
|
||||||
"no-cache": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Complete list of valid fields for `x-bake`:
|
|
||||||
|
|
||||||
* `cache-from`
|
|
||||||
* `cache-to`
|
|
||||||
* `contexts`
|
|
||||||
* `no-cache`
|
|
||||||
* `no-cache-filter`
|
|
||||||
* `output`
|
|
||||||
* `platforms`
|
|
||||||
* `pull`
|
|
||||||
* `secret`
|
|
||||||
* `ssh`
|
|
||||||
* `tags`
|
|
||||||
@@ -1,216 +0,0 @@
|
|||||||
# Configuring builds
|
|
||||||
|
|
||||||
Bake supports loading build definition from files, but sometimes you need even
|
|
||||||
more flexibility to configure this definition.
|
|
||||||
|
|
||||||
For this use case, you can define variables inside the bake files that can be
|
|
||||||
set by the user with environment variables or by [attribute definitions](#global-scope-attributes)
|
|
||||||
in other bake files. If you wish to change a specific value for a single
|
|
||||||
invocation you can use the `--set` flag [from the command line](#from-command-line).
|
|
||||||
|
|
||||||
## Global scope attributes
|
|
||||||
|
|
||||||
You can define global scope attributes in HCL/JSON and use them for code reuse
|
|
||||||
and setting values for variables. This means you can do a "data-only" HCL file
|
|
||||||
with the values you want to set/override and use it in the list of regular
|
|
||||||
output files.
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
variable "FOO" {
|
|
||||||
default = "abc"
|
|
||||||
}
|
|
||||||
|
|
||||||
target "app" {
|
|
||||||
args = {
|
|
||||||
v1 = "pre-${FOO}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
You can use this file directly:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print app
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"app"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"app": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"v1": "pre-abc"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Or create an override configuration file:
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# env.hcl
|
|
||||||
WHOAMI="myuser"
|
|
||||||
FOO="def-${WHOAMI}"
|
|
||||||
```
|
|
||||||
|
|
||||||
And invoke bake together with both of the files:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake -f docker-bake.hcl -f env.hcl --print app
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"app"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"app": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"v1": "pre-def-myuser"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## From command line
|
|
||||||
|
|
||||||
You can also override target configurations from the command line with the
|
|
||||||
[`--set` flag](https://docs.docker.com/engine/reference/commandline/buildx_bake/#set):
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
target "app" {
|
|
||||||
args = {
|
|
||||||
mybuildarg = "foo"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --set app.args.mybuildarg=bar --set app.platform=linux/arm64 app --print
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"app"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"app": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"mybuildarg": "bar"
|
|
||||||
},
|
|
||||||
"platforms": [
|
|
||||||
"linux/arm64"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Pattern matching syntax defined in [https://golang.org/pkg/path/#Match](https://golang.org/pkg/path/#Match)
|
|
||||||
is also supported:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --set foo*.args.mybuildarg=value # overrides build arg for all targets starting with "foo"
|
|
||||||
$ docker buildx bake --set *.platform=linux/arm64 # overrides platform for all targets
|
|
||||||
$ docker buildx bake --set foo*.no-cache # bypass caching only for targets starting with "foo"
|
|
||||||
```
|
|
||||||
|
|
||||||
Complete list of overridable fields:
|
|
||||||
|
|
||||||
* `args`
|
|
||||||
* `cache-from`
|
|
||||||
* `cache-to`
|
|
||||||
* `context`
|
|
||||||
* `dockerfile`
|
|
||||||
* `labels`
|
|
||||||
* `no-cache`
|
|
||||||
* `output`
|
|
||||||
* `platform`
|
|
||||||
* `pull`
|
|
||||||
* `secrets`
|
|
||||||
* `ssh`
|
|
||||||
* `tags`
|
|
||||||
* `target`
|
|
||||||
|
|
||||||
## Using variables in variables across files
|
|
||||||
|
|
||||||
When multiple files are specified, one file can use variables defined in
|
|
||||||
another file.
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake1.hcl
|
|
||||||
variable "FOO" {
|
|
||||||
default = upper("${BASE}def")
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "BAR" {
|
|
||||||
default = "-${FOO}-"
|
|
||||||
}
|
|
||||||
|
|
||||||
target "app" {
|
|
||||||
args = {
|
|
||||||
v1 = "pre-${BAR}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake2.hcl
|
|
||||||
variable "BASE" {
|
|
||||||
default = "abc"
|
|
||||||
}
|
|
||||||
|
|
||||||
target "app" {
|
|
||||||
args = {
|
|
||||||
v2 = "${FOO}-post"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake -f docker-bake1.hcl -f docker-bake2.hcl --print app
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"app"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"app": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"v1": "pre--ABCDEF-",
|
|
||||||
"v2": "ABCDEF-post"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
@@ -1,440 +0,0 @@
|
|||||||
# Bake file definition
|
|
||||||
|
|
||||||
`buildx bake` supports HCL, JSON and Compose file format for defining build
|
|
||||||
[groups](#group), [targets](#target) as well as [variables](#variable) and
|
|
||||||
[functions](#functions). It looks for build definition files in the current
|
|
||||||
directory in the following order:
|
|
||||||
|
|
||||||
* `docker-compose.yml`
|
|
||||||
* `docker-compose.yaml`
|
|
||||||
* `docker-bake.json`
|
|
||||||
* `docker-bake.override.json`
|
|
||||||
* `docker-bake.hcl`
|
|
||||||
* `docker-bake.override.hcl`
|
|
||||||
|
|
||||||
## Specification
|
|
||||||
|
|
||||||
Inside a bake file you can declare group, target and variable blocks to define
|
|
||||||
project specific reusable build flows.
|
|
||||||
|
|
||||||
### Target
|
|
||||||
|
|
||||||
A target reflects a single docker build invocation with the same options that
|
|
||||||
you would specify for `docker build`:
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
target "webapp-dev" {
|
|
||||||
dockerfile = "Dockerfile.webapp"
|
|
||||||
tags = ["docker.io/username/webapp:latest"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
```console
|
|
||||||
$ docker buildx bake webapp-dev
|
|
||||||
```
|
|
||||||
|
|
||||||
> **Note**
|
|
||||||
>
|
|
||||||
> In the case of compose files, each service corresponds to a target.
|
|
||||||
> If compose service name contains a dot it will be replaced with an underscore.
|
|
||||||
|
|
||||||
Complete list of valid target fields available for [HCL](#hcl-definition) and
|
|
||||||
[JSON](#json-definition) definitions:
|
|
||||||
|
|
||||||
| Name | Type | Description |
|
|
||||||
|---------------------|--------|-------------------------------------------------------------------------------------------------------------------------------------------------|
|
|
||||||
| `inherits` | List | [Inherit build options](#merging-and-inheritance) from other targets |
|
|
||||||
| `args` | Map | Set build-time variables (same as [`--build-arg` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
|
|
||||||
| `cache-from` | List | External cache sources (same as [`--cache-from` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
|
|
||||||
| `cache-to` | List | Cache export destinations (same as [`--cache-to` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
|
|
||||||
| `context` | String | Set of files located in the specified path or URL |
|
|
||||||
| `contexts` | Map | Additional build contexts (same as [`--build-context` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
|
|
||||||
| `dockerfile` | String | Name of the Dockerfile (same as [`--file` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
|
|
||||||
| `dockerfile-inline` | String | Inline Dockerfile content |
|
|
||||||
| `labels` | Map | Set metadata for an image (same as [`--label` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
|
|
||||||
| `no-cache` | Bool | Do not use cache when building the image (same as [`--no-cache` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
|
|
||||||
| `no-cache-filter` | List | Do not cache specified stages (same as [`--no-cache-filter` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
|
|
||||||
| `output` | List | Output destination (same as [`--output` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
|
|
||||||
| `platforms` | List | Set target platforms for build (same as [`--platform` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
|
|
||||||
| `pull` | Bool | Always attempt to pull all referenced images (same as [`--pull` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
|
|
||||||
| `secret` | List | Secret to expose to the build (same as [`--secret` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
|
|
||||||
| `ssh` | List | SSH agent socket or keys to expose to the build (same as [`--ssh` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
|
|
||||||
| `tags` | List | Name and optionally a tag in the format `name:tag` (same as [`--tag` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
|
|
||||||
| `target` | String | Set the target build stage to build (same as [`--target` flag](https://docs.docker.com/engine/reference/commandline/buildx_build/)) |
|
|
||||||
|
|
||||||
### Group
|
|
||||||
|
|
||||||
A group is a grouping of targets:
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
group "build" {
|
|
||||||
targets = ["db", "webapp-dev"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "webapp-dev" {
|
|
||||||
dockerfile = "Dockerfile.webapp"
|
|
||||||
tags = ["docker.io/username/webapp:latest"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "db" {
|
|
||||||
dockerfile = "Dockerfile.db"
|
|
||||||
tags = ["docker.io/username/db"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
```console
|
|
||||||
$ docker buildx bake build
|
|
||||||
```
|
|
||||||
|
|
||||||
### Variable
|
|
||||||
|
|
||||||
Similar to how Terraform provides a way to [define variables](https://www.terraform.io/docs/configuration/variables.html#declaring-an-input-variable),
|
|
||||||
the HCL file format also supports variable block definitions. These can be used
|
|
||||||
to define variables with values provided by the current environment, or a
|
|
||||||
default value when unset:
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
variable "TAG" {
|
|
||||||
default = "latest"
|
|
||||||
}
|
|
||||||
|
|
||||||
target "webapp-dev" {
|
|
||||||
dockerfile = "Dockerfile.webapp"
|
|
||||||
tags = ["docker.io/username/webapp:${TAG}"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
```console
|
|
||||||
$ docker buildx bake webapp-dev # will use the default value "latest"
|
|
||||||
$ TAG=dev docker buildx bake webapp-dev # will use the TAG environment variable value
|
|
||||||
```
|
|
||||||
|
|
||||||
> **Tip**
|
|
||||||
>
|
|
||||||
> See also the [Configuring builds](configuring-build.md) page for advanced usage.
|
|
||||||
|
|
||||||
### Functions
|
|
||||||
|
|
||||||
A [set of generally useful functions](https://github.com/docker/buildx/blob/master/bake/hclparser/stdlib.go)
|
|
||||||
provided by [go-cty](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib)
|
|
||||||
are available for use in HCL files:
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
target "webapp-dev" {
|
|
||||||
dockerfile = "Dockerfile.webapp"
|
|
||||||
tags = ["docker.io/username/webapp:latest"]
|
|
||||||
args = {
|
|
||||||
buildno = "${add(123, 1)}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
In addition, [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc)
|
|
||||||
are also supported:
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
function "increment" {
|
|
||||||
params = [number]
|
|
||||||
result = number + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
target "webapp-dev" {
|
|
||||||
dockerfile = "Dockerfile.webapp"
|
|
||||||
tags = ["docker.io/username/webapp:latest"]
|
|
||||||
args = {
|
|
||||||
buildno = "${increment(123)}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
> **Note**
|
|
||||||
>
|
|
||||||
> See [User defined HCL functions](hcl-funcs.md) page for more details.
|
|
||||||
|
|
||||||
## Built-in variables
|
|
||||||
|
|
||||||
* `BAKE_CMD_CONTEXT` can be used to access the main `context` for bake command
|
|
||||||
from a bake file that has been [imported remotely](file-definition.md#remote-definition).
|
|
||||||
* `BAKE_LOCAL_PLATFORM` returns the current platform's default platform
|
|
||||||
specification (e.g. `linux/amd64`).
|
|
||||||
|
|
||||||
## Merging and inheritance
|
|
||||||
|
|
||||||
Multiple files can include the same target and final build options will be
|
|
||||||
determined by merging them together:
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
target "webapp-dev" {
|
|
||||||
dockerfile = "Dockerfile.webapp"
|
|
||||||
tags = ["docker.io/username/webapp:latest"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
```hcl
|
|
||||||
# docker-bake2.hcl
|
|
||||||
target "webapp-dev" {
|
|
||||||
tags = ["docker.io/username/webapp:dev"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
```console
|
|
||||||
$ docker buildx bake -f docker-bake.hcl -f docker-bake2.hcl webapp-dev
|
|
||||||
```
|
|
||||||
|
|
||||||
A group can specify its list of targets with the `targets` option. A target can
|
|
||||||
inherit build options by setting the `inherits` option to the list of targets or
|
|
||||||
groups to inherit from:
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
target "webapp-dev" {
|
|
||||||
dockerfile = "Dockerfile.webapp"
|
|
||||||
tags = ["docker.io/username/webapp:${TAG}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "webapp-release" {
|
|
||||||
inherits = ["webapp-dev"]
|
|
||||||
platforms = ["linux/amd64", "linux/arm64"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## `default` target/group
|
|
||||||
|
|
||||||
When you invoke `bake` you specify what targets/groups you want to build. If no
|
|
||||||
arguments is specified, the group/target named `default` will be built:
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
target "default" {
|
|
||||||
dockerfile = "Dockerfile.webapp"
|
|
||||||
tags = ["docker.io/username/webapp:latest"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
```console
|
|
||||||
$ docker buildx bake
|
|
||||||
```
|
|
||||||
|
|
||||||
## Definitions
|
|
||||||
|
|
||||||
### HCL definition
|
|
||||||
|
|
||||||
HCL definition file is recommended as its experience is more aligned with buildx UX
|
|
||||||
and also allows better code reuse, different target groups and extended features.
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
variable "TAG" {
|
|
||||||
default = "latest"
|
|
||||||
}
|
|
||||||
|
|
||||||
group "default" {
|
|
||||||
targets = ["db", "webapp-dev"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "webapp-dev" {
|
|
||||||
dockerfile = "Dockerfile.webapp"
|
|
||||||
tags = ["docker.io/username/webapp:${TAG}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "webapp-release" {
|
|
||||||
inherits = ["webapp-dev"]
|
|
||||||
platforms = ["linux/amd64", "linux/arm64"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "db" {
|
|
||||||
dockerfile = "Dockerfile.db"
|
|
||||||
tags = ["docker.io/username/db"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### JSON definition
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"variable": {
|
|
||||||
"TAG": {
|
|
||||||
"default": "latest"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"db",
|
|
||||||
"webapp-dev"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"webapp-dev": {
|
|
||||||
"dockerfile": "Dockerfile.webapp",
|
|
||||||
"tags": [
|
|
||||||
"docker.io/username/webapp:${TAG}"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"webapp-release": {
|
|
||||||
"inherits": [
|
|
||||||
"webapp-dev"
|
|
||||||
],
|
|
||||||
"platforms": [
|
|
||||||
"linux/amd64",
|
|
||||||
"linux/arm64"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"db": {
|
|
||||||
"dockerfile": "Dockerfile.db",
|
|
||||||
"tags": [
|
|
||||||
"docker.io/username/db"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Compose file
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# docker-compose.yml
|
|
||||||
services:
|
|
||||||
webapp:
|
|
||||||
image: docker.io/username/webapp:latest
|
|
||||||
build:
|
|
||||||
dockerfile: Dockerfile.webapp
|
|
||||||
|
|
||||||
db:
|
|
||||||
image: docker.io/username/db
|
|
||||||
build:
|
|
||||||
dockerfile: Dockerfile.db
|
|
||||||
```
|
|
||||||
|
|
||||||
> **Note**
|
|
||||||
>
|
|
||||||
> See [Building from Compose file](compose-file.md) page for more details.
|
|
||||||
|
|
||||||
## Remote definition
|
|
||||||
|
|
||||||
You can also build bake files directly from a remote Git repository or HTTPS URL:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake "https://github.com/docker/cli.git#v20.10.11" --print
|
|
||||||
#1 [internal] load git source https://github.com/docker/cli.git#v20.10.11
|
|
||||||
#1 0.745 e8f1871b077b64bcb4a13334b7146492773769f7 refs/tags/v20.10.11
|
|
||||||
#1 2.022 From https://github.com/docker/cli
|
|
||||||
#1 2.022 * [new tag] v20.10.11 -> v20.10.11
|
|
||||||
#1 DONE 2.9s
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"binary"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"binary": {
|
|
||||||
"context": "https://github.com/docker/cli.git#v20.10.11",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"BASE_VARIANT": "alpine",
|
|
||||||
"GO_STRIP": "",
|
|
||||||
"VERSION": ""
|
|
||||||
},
|
|
||||||
"target": "binary",
|
|
||||||
"platforms": [
|
|
||||||
"local"
|
|
||||||
],
|
|
||||||
"output": [
|
|
||||||
"build"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
As you can see the context is fixed to `https://github.com/docker/cli.git` even if
|
|
||||||
[no context is actually defined](https://github.com/docker/cli/blob/2776a6d694f988c0c1df61cad4bfac0f54e481c8/docker-bake.hcl#L17-L26)
|
|
||||||
in the definition.
|
|
||||||
|
|
||||||
If you want to access the main context for bake command from a bake file
|
|
||||||
that has been imported remotely, you can use the [`BAKE_CMD_CONTEXT` built-in var](#built-in-variables).
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ cat https://raw.githubusercontent.com/tonistiigi/buildx/remote-test/docker-bake.hcl
|
|
||||||
```
|
|
||||||
```hcl
|
|
||||||
target "default" {
|
|
||||||
context = BAKE_CMD_CONTEXT
|
|
||||||
dockerfile-inline = <<EOT
|
|
||||||
FROM alpine
|
|
||||||
WORKDIR /src
|
|
||||||
COPY . .
|
|
||||||
RUN ls -l && stop
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" --print
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"target": {
|
|
||||||
"default": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ touch foo bar
|
|
||||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test"
|
|
||||||
```
|
|
||||||
```text
|
|
||||||
...
|
|
||||||
> [4/4] RUN ls -l && stop:
|
|
||||||
#8 0.101 total 0
|
|
||||||
#8 0.102 -rw-r--r-- 1 root root 0 Jul 27 18:47 bar
|
|
||||||
#8 0.102 -rw-r--r-- 1 root root 0 Jul 27 18:47 foo
|
|
||||||
#8 0.102 /bin/sh: stop: not found
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" "https://github.com/docker/cli.git#v20.10.11" --print
|
|
||||||
#1 [internal] load git source https://github.com/tonistiigi/buildx.git#remote-test
|
|
||||||
#1 0.429 577303add004dd7efeb13434d69ea030d35f7888 refs/heads/remote-test
|
|
||||||
#1 CACHED
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"target": {
|
|
||||||
"default": {
|
|
||||||
"context": "https://github.com/docker/cli.git#v20.10.11",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" "https://github.com/docker/cli.git#v20.10.11"
|
|
||||||
```
|
|
||||||
```text
|
|
||||||
...
|
|
||||||
> [4/4] RUN ls -l && stop:
|
|
||||||
#8 0.136 drwxrwxrwx 5 root root 4096 Jul 27 18:31 kubernetes
|
|
||||||
#8 0.136 drwxrwxrwx 3 root root 4096 Jul 27 18:31 man
|
|
||||||
#8 0.136 drwxrwxrwx 2 root root 4096 Jul 27 18:31 opts
|
|
||||||
#8 0.136 -rw-rw-rw- 1 root root 1893 Jul 27 18:31 poule.yml
|
|
||||||
#8 0.136 drwxrwxrwx 7 root root 4096 Jul 27 18:31 scripts
|
|
||||||
#8 0.136 drwxrwxrwx 3 root root 4096 Jul 27 18:31 service
|
|
||||||
#8 0.136 drwxrwxrwx 2 root root 4096 Jul 27 18:31 templates
|
|
||||||
#8 0.136 drwxrwxrwx 10 root root 4096 Jul 27 18:31 vendor
|
|
||||||
#8 0.136 -rwxrwxrwx 1 root root 9620 Jul 27 18:31 vendor.conf
|
|
||||||
#8 0.136 /bin/sh: stop: not found
|
|
||||||
```
|
|
||||||
@@ -1,327 +0,0 @@
|
|||||||
# User defined HCL functions
|
|
||||||
|
|
||||||
## Using interpolation to tag an image with the git sha
|
|
||||||
|
|
||||||
As shown in the [File definition](file-definition.md#variable) page, `bake`
|
|
||||||
supports variable blocks which are assigned to matching environment variables
|
|
||||||
or default values:
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
variable "TAG" {
|
|
||||||
default = "latest"
|
|
||||||
}
|
|
||||||
|
|
||||||
group "default" {
|
|
||||||
targets = ["webapp"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "webapp" {
|
|
||||||
tags = ["docker.io/username/webapp:${TAG}"]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
alternatively, in json format:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"variable": {
|
|
||||||
"TAG": {
|
|
||||||
"default": "latest"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": ["webapp"]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"webapp": {
|
|
||||||
"tags": ["docker.io/username/webapp:${TAG}"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print webapp
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"webapp"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"webapp": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"tags": [
|
|
||||||
"docker.io/username/webapp:latest"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ TAG=$(git rev-parse --short HEAD) docker buildx bake --print webapp
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"webapp"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"webapp": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"tags": [
|
|
||||||
"docker.io/username/webapp:985e9e9"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Using the `add` function
|
|
||||||
|
|
||||||
You can use [`go-cty` stdlib functions](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib).
|
|
||||||
Here we are using the `add` function.
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
variable "TAG" {
|
|
||||||
default = "latest"
|
|
||||||
}
|
|
||||||
|
|
||||||
group "default" {
|
|
||||||
targets = ["webapp"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "webapp" {
|
|
||||||
args = {
|
|
||||||
buildno = "${add(123, 1)}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print webapp
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"webapp"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"webapp": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"buildno": "124"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Defining an `increment` function
|
|
||||||
|
|
||||||
It also supports [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc).
|
|
||||||
The following example defines a simple an `increment` function.
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
function "increment" {
|
|
||||||
params = [number]
|
|
||||||
result = number + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
group "default" {
|
|
||||||
targets = ["webapp"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "webapp" {
|
|
||||||
args = {
|
|
||||||
buildno = "${increment(123)}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print webapp
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"webapp"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"webapp": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"buildno": "124"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Only adding tags if a variable is not empty using an `notequal`
|
|
||||||
|
|
||||||
Here we are using the conditional `notequal` function which is just for
|
|
||||||
symmetry with the `equal` one.
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
variable "TAG" {default="" }
|
|
||||||
|
|
||||||
group "default" {
|
|
||||||
targets = [
|
|
||||||
"webapp",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "webapp" {
|
|
||||||
context="."
|
|
||||||
dockerfile="Dockerfile"
|
|
||||||
tags = [
|
|
||||||
"my-image:latest",
|
|
||||||
notequal("",TAG) ? "my-image:${TAG}": "",
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print webapp
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"webapp"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"webapp": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"tags": [
|
|
||||||
"my-image:latest"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Using variables in functions
|
|
||||||
|
|
||||||
You can refer variables to other variables like the target blocks can. Stdlib
|
|
||||||
functions can also be called but user functions can't at the moment.
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
variable "REPO" {
|
|
||||||
default = "user/repo"
|
|
||||||
}
|
|
||||||
|
|
||||||
function "tag" {
|
|
||||||
params = [tag]
|
|
||||||
result = ["${REPO}:${tag}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "webapp" {
|
|
||||||
tags = tag("v1")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print webapp
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"webapp"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"webapp": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"tags": [
|
|
||||||
"user/repo:v1"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Using typed variables
|
|
||||||
|
|
||||||
Non-string variables are also accepted. The value passed with env is parsed
|
|
||||||
into suitable type first.
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
variable "FOO" {
|
|
||||||
default = 3
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "IS_FOO" {
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
target "app" {
|
|
||||||
args = {
|
|
||||||
v1 = FOO > 5 ? "higher" : "lower"
|
|
||||||
v2 = IS_FOO ? "yes" : "no"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print app
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"app"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"app": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"v1": "lower",
|
|
||||||
"v2": "yes"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
# High-level build options with Bake
|
|
||||||
|
|
||||||
> This command is experimental.
|
|
||||||
>
|
|
||||||
> The design of bake is in early stages, and we are looking for [feedback from users](https://github.com/docker/buildx/issues).
|
|
||||||
{: .experimental }
|
|
||||||
|
|
||||||
Buildx also aims to provide support for high-level build concepts that go beyond
|
|
||||||
invoking a single build command. We want to support building all the images in
|
|
||||||
your application together and let the users define project specific reusable
|
|
||||||
build flows that can then be easily invoked by anyone.
|
|
||||||
|
|
||||||
[BuildKit](https://github.com/moby/buildkit) efficiently handles multiple
|
|
||||||
concurrent build requests and de-duplicating work. The build commands can be
|
|
||||||
combined with general-purpose command runners (for example, `make`). However,
|
|
||||||
these tools generally invoke builds in sequence and therefore cannot leverage
|
|
||||||
the full potential of BuildKit parallelization, or combine BuildKit's output
|
|
||||||
for the user. For this use case, we have added a command called
|
|
||||||
[`docker buildx bake`](https://docs.docker.com/engine/reference/commandline/buildx_bake/).
|
|
||||||
|
|
||||||
The `bake` command supports building images from HCL, JSON and Compose files.
|
|
||||||
This is similar to [`docker compose build`](https://docs.docker.com/compose/reference/build/),
|
|
||||||
but allowing all the services to be built concurrently as part of a single
|
|
||||||
request. If multiple files are specified they are all read and configurations are
|
|
||||||
combined.
|
|
||||||
|
|
||||||
We recommend using HCL files as its experience is more aligned with buildx UX
|
|
||||||
and also allows better code reuse, different target groups and extended features.
|
|
||||||
|
|
||||||
## Next steps
|
|
||||||
|
|
||||||
* [File definition](file-definition.md)
|
|
||||||
* [Configuring builds](configuring-build.md)
|
|
||||||
* [User defined HCL functions](hcl-funcs.md)
|
|
||||||
* [Defining additional build contexts and linking targets](build-contexts.md)
|
|
||||||
* [Building from Compose file](compose-file.md)
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
# CI/CD
|
|
||||||
|
|
||||||
## GitHub Actions
|
|
||||||
|
|
||||||
Docker provides a [GitHub Action that will build and push your image](https://github.com/docker/build-push-action/#about)
|
|
||||||
using Buildx. Here is a simple workflow:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
name: ci
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'main'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
docker:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v2
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
-
|
|
||||||
name: Login to DockerHub
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
-
|
|
||||||
name: Build and push
|
|
||||||
uses: docker/build-push-action@v2
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
tags: user/app:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
In this example we are also using 3 other actions:
|
|
||||||
|
|
||||||
* [`setup-buildx`](https://github.com/docker/setup-buildx-action) action will create and boot a builder using by
|
|
||||||
default the `docker-container` [builder driver](../reference/buildx_create.md#driver).
|
|
||||||
This is **not required but recommended** using it to be able to build multi-platform images, export cache, etc.
|
|
||||||
* [`setup-qemu`](https://github.com/docker/setup-qemu-action) action can be useful if you want
|
|
||||||
to add emulation support with QEMU to be able to build against more platforms.
|
|
||||||
* [`login`](https://github.com/docker/login-action) action will take care to log
|
|
||||||
in against a Docker registry.
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
# CNI networking
|
|
||||||
|
|
||||||
It can be useful to use a bridge network for your builder if for example you
|
|
||||||
encounter a network port contention during multiple builds. If you're using
|
|
||||||
the BuildKit image, CNI is not yet available in it, but you can create
|
|
||||||
[a custom BuildKit image with CNI support](https://github.com/moby/buildkit/blob/master/docs/cni-networking.md).
|
|
||||||
|
|
||||||
Now build this image:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --tag buildkit-cni:local --load .
|
|
||||||
```
|
|
||||||
|
|
||||||
Then [create a `docker-container` builder](https://docs.docker.com/engine/reference/commandline/buildx_create/) that
|
|
||||||
will use this image:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create --use \
|
|
||||||
--name mybuilder \
|
|
||||||
--driver docker-container \
|
|
||||||
--driver-opt "image=buildkit-cni:local" \
|
|
||||||
--buildkitd-flags "--oci-worker-net=cni"
|
|
||||||
```
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
# Color output controls
|
|
||||||
|
|
||||||
Buildx has support for modifying the colors that are used to output information
|
|
||||||
to the terminal. You can set the environment variable `BUILDKIT_COLORS` to
|
|
||||||
something like `run=123,20,245:error=yellow:cancel=blue:warning=white` to set
|
|
||||||
the colors that you would like to use:
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
Setting `NO_COLOR` to anything will disable any colorized output as recommended
|
|
||||||
by [no-color.org](https://no-color.org/):
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
> **Note**
|
|
||||||
>
|
|
||||||
> Parsing errors will be reported but ignored. This will result in default
|
|
||||||
> color values being used where needed.
|
|
||||||
|
|
||||||
See also [the list of pre-defined colors](https://github.com/moby/buildkit/blob/master/util/progress/progressui/colors.go).
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
# Using a custom network
|
|
||||||
|
|
||||||
[Create a network](https://docs.docker.com/engine/reference/commandline/network_create/)
|
|
||||||
named `foonet`:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker network create foonet
|
|
||||||
```
|
|
||||||
|
|
||||||
[Create a `docker-container` builder](https://docs.docker.com/engine/reference/commandline/buildx_create/)
|
|
||||||
named `mybuilder` that will use this network:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create --use \
|
|
||||||
--name mybuilder \
|
|
||||||
--driver docker-container \
|
|
||||||
--driver-opt "network=foonet"
|
|
||||||
```
|
|
||||||
|
|
||||||
Boot and [inspect `mybuilder`](https://docs.docker.com/engine/reference/commandline/buildx_inspect/):
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx inspect --bootstrap
|
|
||||||
```
|
|
||||||
|
|
||||||
[Inspect the builder container](https://docs.docker.com/engine/reference/commandline/inspect/)
|
|
||||||
and see what network is being used:
|
|
||||||
|
|
||||||
{% raw %}
|
|
||||||
```console
|
|
||||||
$ docker inspect buildx_buildkit_mybuilder0 --format={{.NetworkSettings.Networks}}
|
|
||||||
map[foonet:0xc00018c0c0]
|
|
||||||
```
|
|
||||||
{% endraw %}
|
|
||||||
@@ -1,63 +0,0 @@
|
|||||||
# Using a custom registry configuration
|
|
||||||
|
|
||||||
If you [create a `docker-container` or `kubernetes` builder](https://docs.docker.com/engine/reference/commandline/buildx_create/) and
|
|
||||||
have specified certificates for registries in the [BuildKit daemon configuration](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md),
|
|
||||||
the files will be copied into the container under `/etc/buildkit/certs` and
|
|
||||||
configuration will be updated to reflect that.
|
|
||||||
|
|
||||||
Take the following `buildkitd.toml` configuration that will be used for
|
|
||||||
pushing an image to this registry using self-signed certificates:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
# /etc/buildkitd.toml
|
|
||||||
debug = true
|
|
||||||
[registry."myregistry.com"]
|
|
||||||
ca=["/etc/certs/myregistry.pem"]
|
|
||||||
[[registry."myregistry.com".keypair]]
|
|
||||||
key="/etc/certs/myregistry_key.pem"
|
|
||||||
cert="/etc/certs/myregistry_cert.pem"
|
|
||||||
```
|
|
||||||
|
|
||||||
Here we have configured a self-signed certificate for `myregistry.com` registry.
|
|
||||||
|
|
||||||
Now [create a `docker-container` builder](https://docs.docker.com/engine/reference/commandline/buildx_create/)
|
|
||||||
that will use this BuildKit configuration:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create --use \
|
|
||||||
--name mybuilder \
|
|
||||||
--driver docker-container \
|
|
||||||
--config /etc/buildkitd.toml
|
|
||||||
```
|
|
||||||
|
|
||||||
Inspecting the builder container, you can see that buildkitd configuration
|
|
||||||
has changed:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker exec -it buildx_buildkit_mybuilder0 cat /etc/buildkit/buildkitd.toml
|
|
||||||
```
|
|
||||||
```toml
|
|
||||||
debug = true
|
|
||||||
|
|
||||||
[registry]
|
|
||||||
|
|
||||||
[registry."myregistry.com"]
|
|
||||||
ca = ["/etc/buildkit/certs/myregistry.com/myregistry.pem"]
|
|
||||||
|
|
||||||
[[registry."myregistry.com".keypair]]
|
|
||||||
cert = "/etc/buildkit/certs/myregistry.com/myregistry_cert.pem"
|
|
||||||
key = "/etc/buildkit/certs/myregistry.com/myregistry_key.pem"
|
|
||||||
```
|
|
||||||
|
|
||||||
And certificates copied inside the container:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker exec -it buildx_buildkit_mybuilder0 ls /etc/buildkit/certs/myregistry.com/
|
|
||||||
myregistry.pem myregistry_cert.pem myregistry_key.pem
|
|
||||||
```
|
|
||||||
|
|
||||||
Now you should be able to push to the registry with this builder:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --push --tag myregistry.com/myimage:latest .
|
|
||||||
```
|
|
||||||
@@ -1,75 +0,0 @@
|
|||||||
# Docker container driver
|
|
||||||
|
|
||||||
The buildx docker-container driver allows creation of a managed and
|
|
||||||
customizable BuildKit environment inside a dedicated Docker container.
|
|
||||||
|
|
||||||
Using the docker-container driver has a couple of advantages over the basic
|
|
||||||
docker driver. Firstly, we can manually override the version of buildkit to
|
|
||||||
use, meaning that we can access the latest and greatest features as soon as
|
|
||||||
they're released, instead of waiting to upgrade to a newer version of Docker.
|
|
||||||
Additionally, we can access more complex features like multi-architecture
|
|
||||||
builds and the more advanced cache exporters, which are currently unsupported
|
|
||||||
in the default docker driver.
|
|
||||||
|
|
||||||
We can easily create a new builder that uses the docker-container driver:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create --name container --driver docker-container
|
|
||||||
container
|
|
||||||
```
|
|
||||||
|
|
||||||
We should then be able to see it on our list of available builders:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx ls
|
|
||||||
NAME/NODE DRIVER/ENDPOINT STATUS BUILDKIT PLATFORMS
|
|
||||||
container docker-container
|
|
||||||
container0 desktop-linux inactive
|
|
||||||
default docker
|
|
||||||
default default running 20.10.17 linux/amd64, linux/386
|
|
||||||
```
|
|
||||||
|
|
||||||
If we trigger a build, the appropriate `moby/buildkit` image will be pulled
|
|
||||||
from [Docker Hub](https://hub.docker.com/u/moby/buildkit), the image started,
|
|
||||||
and our build submitted to our containerized build server.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build -t <image> --builder=container .
|
|
||||||
WARNING: No output specified with docker-container driver. Build result will only remain in the build cache. To push result image into registry use --push or to load image into docker use --load
|
|
||||||
#1 [internal] booting buildkit
|
|
||||||
#1 pulling image moby/buildkit:buildx-stable-1
|
|
||||||
#1 pulling image moby/buildkit:buildx-stable-1 1.9s done
|
|
||||||
#1 creating container buildx_buildkit_container0
|
|
||||||
#1 creating container buildx_buildkit_container0 0.5s done
|
|
||||||
#1 DONE 2.4s
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
Note the warning "Build result will only remain in the build cache" - unlike
|
|
||||||
the `docker` driver, the built image must be explicitly loaded into the local
|
|
||||||
image store. We can use the `--load` flag for this:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --load -t <image> --builder=container .
|
|
||||||
...
|
|
||||||
=> exporting to oci image format 7.7s
|
|
||||||
=> => exporting layers 4.9s
|
|
||||||
=> => exporting manifest sha256:4e4ca161fa338be2c303445411900ebbc5fc086153a0b846ac12996960b479d3 0.0s
|
|
||||||
=> => exporting config sha256:adf3eec768a14b6e183a1010cb96d91155a82fd722a1091440c88f3747f1f53f 0.0s
|
|
||||||
=> => sending tarball 2.8s
|
|
||||||
=> importing to docker
|
|
||||||
```
|
|
||||||
|
|
||||||
The image should then be available in the image store:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker image ls
|
|
||||||
REPOSITORY TAG IMAGE ID CREATED SIZE
|
|
||||||
<image> latest adf3eec768a1 2 minutes ago 197MB
|
|
||||||
```
|
|
||||||
|
|
||||||
## Further reading
|
|
||||||
|
|
||||||
For more information on the docker-container driver, see the [buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver).
|
|
||||||
|
|
||||||
<!--- FIXME: for 0.9, make reference link relative --->
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
# Docker driver
|
|
||||||
|
|
||||||
The buildx docker driver is the default builtin driver, that uses the BuildKit
|
|
||||||
server components built directly into the docker engine.
|
|
||||||
|
|
||||||
No setup should be required for the docker driver - it should already be
|
|
||||||
configured for you:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx ls
|
|
||||||
NAME/NODE DRIVER/ENDPOINT STATUS BUILDKIT PLATFORMS
|
|
||||||
default docker
|
|
||||||
default default running 20.10.17 linux/amd64, linux/386
|
|
||||||
```
|
|
||||||
|
|
||||||
This builder is ready to build with out-of-the-box, requiring no extra setup,
|
|
||||||
so you can get going with a `docker buildx build` as soon as you like.
|
|
||||||
|
|
||||||
Depending on your personal setup, you may find multiple builders in your list
|
|
||||||
the use the docker driver. For example, on a system that runs both a package
|
|
||||||
managed version of dockerd, as well as Docker Desktop, you might have the
|
|
||||||
following:
|
|
||||||
|
|
||||||
```console
|
|
||||||
NAME/NODE DRIVER/ENDPOINT STATUS BUILDKIT PLATFORMS
|
|
||||||
default docker
|
|
||||||
default default running 20.10.17 linux/amd64, linux/386
|
|
||||||
desktop-linux * docker
|
|
||||||
desktop-linux desktop-linux running 20.10.17 linux/amd64, linux/arm64, linux/riscv64, linux/ppc64le, linux/s390x, linux/386, linux/arm/v7, linux/arm/v6
|
|
||||||
```
|
|
||||||
|
|
||||||
This is because the docker driver builders are automatically pulled from
|
|
||||||
the available [Docker Contexts](https://docs.docker.com/engine/context/working-with-contexts/).
|
|
||||||
When you add new contexts using `docker context create`, these will appear in
|
|
||||||
your list of buildx builders.
|
|
||||||
|
|
||||||
Unlike the [other drivers](../index.md), builders using the docker driver
|
|
||||||
cannot be manually created, and can only be automatically created from the
|
|
||||||
docker context. Additionally, they cannot be configured to a specific BuildKit
|
|
||||||
version, and cannot take any extra parameters, as these are both preset by the
|
|
||||||
Docker engine internally.
|
|
||||||
|
|
||||||
If you want the extra configuration and flexibility without too much more
|
|
||||||
overhead, then see the help page for the [docker-container driver](./docker-container.md).
|
|
||||||
|
|
||||||
## Further reading
|
|
||||||
|
|
||||||
For more information on the docker driver, see the [buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver).
|
|
||||||
|
|
||||||
<!--- FIXME: for 0.9, make reference link relative --->
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
# Buildx drivers overview
|
|
||||||
|
|
||||||
The buildx client connects out to the BuildKit backend to execute builds -
|
|
||||||
Buildx drivers allow fine-grained control over management of the backend, and
|
|
||||||
supports several different options for where and how BuildKit should run.
|
|
||||||
|
|
||||||
Currently, we support the following drivers:
|
|
||||||
|
|
||||||
- The `docker` driver, that uses the BuildKit library bundled into the Docker
|
|
||||||
daemon.
|
|
||||||
([guide](./docker.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
|
||||||
- The `docker-container` driver, that launches a dedicated BuildKit container
|
|
||||||
using Docker, for access to advanced features.
|
|
||||||
([guide](./docker-container.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
|
||||||
- The `kubernetes` driver, that launches dedicated BuildKit pods in a
|
|
||||||
remote Kubernetes cluster, for scalable builds.
|
|
||||||
([guide](./kubernetes.md), [reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver))
|
|
||||||
- The `remote` driver, that allows directly connecting to a manually managed
|
|
||||||
BuildKit daemon, for more custom setups.
|
|
||||||
([guide](./remote.md))
|
|
||||||
|
|
||||||
<!--- FIXME: for 0.9, make links relative, and add reference link for remote --->
|
|
||||||
|
|
||||||
To create a new builder that uses one of the above drivers, you can use the
|
|
||||||
[`docker buildx create`](https://docs.docker.com/engine/reference/commandline/buildx_create/) command:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create --name=<builder-name> --driver=<driver> --driver-opt=<driver-options>
|
|
||||||
```
|
|
||||||
|
|
||||||
The build experience is very similar across drivers, however, there are some
|
|
||||||
features that are not evenly supported across the board, notably, the `docker`
|
|
||||||
driver does not include support for certain output/caching types.
|
|
||||||
|
|
||||||
| Feature | `docker` | `docker-container` | `kubernetes` | `remote` |
|
|
||||||
| :---------------------------- | :-------------: | :----------------: | :----------: | :--------------------: |
|
|
||||||
| **Automatic `--load`** | ✅ | ❌ | ❌ | ❌ |
|
|
||||||
| **Cache export** | ❔ (inline only) | ✅ | ✅ | ✅ |
|
|
||||||
| **Docker/OCI tarball output** | ❌ | ✅ | ✅ | ✅ |
|
|
||||||
| **Multi-arch images** | ❌ | ✅ | ✅ | ✅ |
|
|
||||||
| **BuildKit configuration** | ❌ | ✅ | ✅ | ❔ (managed externally) |
|
|
||||||
@@ -1,238 +0,0 @@
|
|||||||
# Kubernetes driver
|
|
||||||
|
|
||||||
The buildx kubernetes driver allows connecting your local development or ci
|
|
||||||
environments to your kubernetes cluster to allow access to more powerful
|
|
||||||
and varied compute resources.
|
|
||||||
|
|
||||||
This guide assumes you already have an existing kubernetes cluster - if you don't already
|
|
||||||
have one, you can easily follow along by installing
|
|
||||||
[minikube](https://minikube.sigs.k8s.io/docs/).
|
|
||||||
|
|
||||||
Before connecting buildx to your cluster, you may want to create a dedicated
|
|
||||||
namespace using `kubectl` to keep your buildx-managed resources separate. You
|
|
||||||
can call your namespace anything you want, or use the existing `default`
|
|
||||||
namespace, but we'll create a `buildkit` namespace for now:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ kubectl create namespace buildkit
|
|
||||||
```
|
|
||||||
|
|
||||||
Then create a new buildx builder:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create \
|
|
||||||
--bootstrap \
|
|
||||||
--name=kube \
|
|
||||||
--driver=kubernetes \
|
|
||||||
--driver-opt=namespace=buildkit
|
|
||||||
```
|
|
||||||
|
|
||||||
This assumes that the kubernetes cluster you want to connect to is currently
|
|
||||||
accessible via the kubectl command, with the `KUBECONFIG` environment variable
|
|
||||||
[set appropriately](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/#set-the-kubeconfig-environment-variable)
|
|
||||||
if neccessary.
|
|
||||||
|
|
||||||
You should now be able to see the builder in the list of buildx builders:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx ls
|
|
||||||
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
|
|
||||||
kube kubernetes
|
|
||||||
kube0-6977cdcb75-k9h9m running linux/amd64, linux/amd64/v2, linux/amd64/v3, linux/386
|
|
||||||
default * docker
|
|
||||||
default default running linux/amd64, linux/386
|
|
||||||
```
|
|
||||||
|
|
||||||
The buildx driver creates the neccessary resources on your cluster in the
|
|
||||||
specified namespace (in this case, `buildkit`), while keeping your
|
|
||||||
driver configuration locally. You can see the running pods with:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ kubectl -n buildkit get deployments
|
|
||||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
|
||||||
kube0 1/1 1 1 32s
|
|
||||||
|
|
||||||
$ kubectl -n buildkit get pods
|
|
||||||
NAME READY STATUS RESTARTS AGE
|
|
||||||
kube0-6977cdcb75-k9h9m 1/1 Running 0 32s
|
|
||||||
```
|
|
||||||
|
|
||||||
You can use your new builder by including the `--builder` flag when running
|
|
||||||
buildx commands. For example (replacing `<user>` and `<image>` with your Docker
|
|
||||||
Hub username and desired image output respectively):
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build . \
|
|
||||||
--builder=kube \
|
|
||||||
-t <user>/<image> \
|
|
||||||
--push
|
|
||||||
```
|
|
||||||
|
|
||||||
## Scaling Buildkit
|
|
||||||
|
|
||||||
One of the main advantages of the kubernetes builder is that you can easily
|
|
||||||
scale your builder up and down to handle increased build load. These controls
|
|
||||||
are exposed via the following options:
|
|
||||||
|
|
||||||
- `replicas=N`
|
|
||||||
- This scales the number of buildkit pods to the desired size. By default,
|
|
||||||
only a single pod will be created, but increasing this allows taking of
|
|
||||||
advantage of multiple nodes in your cluster.
|
|
||||||
- `requests.cpu`, `requests.memory`, `limits.cpu`, `limits.memory`
|
|
||||||
- These options allow requesting and limiting the resources available to each
|
|
||||||
buildkit pod according to the official kubernetes documentation
|
|
||||||
[here](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/).
|
|
||||||
|
|
||||||
For example, to create 4 replica buildkit pods:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create \
|
|
||||||
--bootstrap \
|
|
||||||
--name=kube \
|
|
||||||
--driver=kubernetes \
|
|
||||||
--driver-opt=namespace=buildkit,replicas=4
|
|
||||||
```
|
|
||||||
|
|
||||||
Listing the pods, we get:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ kubectl -n buildkit get deployments
|
|
||||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
|
||||||
kube0 4/4 4 4 8s
|
|
||||||
|
|
||||||
$ kubectl -n buildkit get pods
|
|
||||||
NAME READY STATUS RESTARTS AGE
|
|
||||||
kube0-6977cdcb75-48ld2 1/1 Running 0 8s
|
|
||||||
kube0-6977cdcb75-rkc6b 1/1 Running 0 8s
|
|
||||||
kube0-6977cdcb75-vb4ks 1/1 Running 0 8s
|
|
||||||
kube0-6977cdcb75-z4fzs 1/1 Running 0 8s
|
|
||||||
```
|
|
||||||
|
|
||||||
Additionally, you can use the `loadbalance=(sticky|random)` option to control
|
|
||||||
the load-balancing behavior when there are multiple replicas. While `random`
|
|
||||||
should selects random nodes from the available pool, which should provide
|
|
||||||
better balancing across all replicas, `sticky` (the default) attempts to
|
|
||||||
connect the same build performed multiple times to the same node each time,
|
|
||||||
ensuring better local cache utilization.
|
|
||||||
|
|
||||||
For more information on scalability, see the options for [buildx create](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver-opt).
|
|
||||||
|
|
||||||
## Multi-platform builds
|
|
||||||
|
|
||||||
The kubernetes buildx driver has support for creating [multi-platform images](https://docs.docker.com/build/buildx/multiplatform-images/),
|
|
||||||
for easily building for multiple platforms at once.
|
|
||||||
|
|
||||||
### QEMU
|
|
||||||
|
|
||||||
Like the other containerized driver `docker-container`, the kubernetes driver
|
|
||||||
also supports using [QEMU](https://www.qemu.org/) (user mode) to build
|
|
||||||
non-native platforms. If using a default setup like above, no extra setup
|
|
||||||
should be needed, you should just be able to start building for other
|
|
||||||
architectures, by including the `--platform` flag.
|
|
||||||
|
|
||||||
For example, to build a Linux image for `amd64` and `arm64`:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build . \
|
|
||||||
--builder=kube \
|
|
||||||
--platform=linux/amd64,linux/arm64 \
|
|
||||||
-t <user>/<image> \
|
|
||||||
--push
|
|
||||||
```
|
|
||||||
|
|
||||||
> **Warning**
|
|
||||||
> QEMU performs full-system emulation of non-native platforms, which is *much*
|
|
||||||
> slower than native builds. Compute-heavy tasks like compilation and
|
|
||||||
> compression/decompression will likely take a large performance hit.
|
|
||||||
|
|
||||||
Note, if you're using a custom buildkit image using the `image=<image>` driver
|
|
||||||
option, or invoking non-native binaries from within your build, you may need to
|
|
||||||
explicitly enable QEMU using the `qemu.install` option during driver creation:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create \
|
|
||||||
--bootstrap \
|
|
||||||
--name=kube \
|
|
||||||
--driver=kubernetes \
|
|
||||||
--driver-opt=namespace=buildkit,qemu.install=true
|
|
||||||
```
|
|
||||||
|
|
||||||
### Native
|
|
||||||
|
|
||||||
If you have access to cluster nodes of different architectures, we can
|
|
||||||
configure the kubernetes driver to take advantage of these for native builds.
|
|
||||||
To do this, we need to use the `--append` feature of `docker buildx create`.
|
|
||||||
|
|
||||||
To start, we can create our builder with explicit support for a single
|
|
||||||
architecture, `amd64`:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create \
|
|
||||||
--bootstrap \
|
|
||||||
--name=kube \
|
|
||||||
--driver=kubernetes \
|
|
||||||
--platform=linux/amd64 \
|
|
||||||
--node=builder-amd64 \
|
|
||||||
--driver-opt=namespace=buildkit,nodeselector="kubernetes.io/arch=amd64"
|
|
||||||
```
|
|
||||||
|
|
||||||
This creates a buildx builder `kube` containing a single builder node `builder-amd64`.
|
|
||||||
Note that the buildx concept of a node is not the same as the kubernetes
|
|
||||||
concept of a node - the buildx node in this case could connect multiple
|
|
||||||
kubernetes nodes of the same architecture together.
|
|
||||||
|
|
||||||
With our `kube` driver created, we can now introduce another architecture into
|
|
||||||
the mix, for example, like before we can use `arm64`:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create \
|
|
||||||
--append \
|
|
||||||
--bootstrap \
|
|
||||||
--name=kube \
|
|
||||||
--driver=kubernetes \
|
|
||||||
--platform=linux/arm64 \
|
|
||||||
--node=builder-arm64 \
|
|
||||||
--driver-opt=namespace=buildkit,nodeselector="kubernetes.io/arch=arm64"
|
|
||||||
```
|
|
||||||
|
|
||||||
If you list builders now, you should be able to see both nodes present:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx ls
|
|
||||||
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
|
|
||||||
kube kubernetes
|
|
||||||
builder-amd64 kubernetes:///kube?deployment=builder-amd64&kubeconfig= running linux/amd64*, linux/amd64/v2, linux/amd64/v3, linux/386
|
|
||||||
builder-arm64 kubernetes:///kube?deployment=builder-arm64&kubeconfig= running linux/arm64*
|
|
||||||
```
|
|
||||||
|
|
||||||
You should now be able to build multi-arch images with `amd64` and `arm64`
|
|
||||||
combined, by specifying those platforms together in your buildx command:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --builder=kube --platform=linux/amd64,linux/arm64 -t <user>/<image> --push .
|
|
||||||
```
|
|
||||||
|
|
||||||
You can repeat the `buildx create --append` command for as many different
|
|
||||||
architectures that you want to support.
|
|
||||||
|
|
||||||
## Rootless mode
|
|
||||||
|
|
||||||
The kubernetes driver supports rootless mode. For more information on how
|
|
||||||
rootless mode works, and it's requirements, see [here](https://github.com/moby/buildkit/blob/master/docs/rootless.md).
|
|
||||||
|
|
||||||
To enable it in your cluster, you can use the `rootless=true` driver option:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create \
|
|
||||||
--name=kube \
|
|
||||||
--driver=kubernetes \
|
|
||||||
--driver-opt=namespace=buildkit,rootless=true
|
|
||||||
```
|
|
||||||
|
|
||||||
This will create your pods without `securityContext.privileged`.
|
|
||||||
|
|
||||||
## Further reading
|
|
||||||
|
|
||||||
For more information on the kubernetes driver, see the [buildx reference](https://docs.docker.com/engine/reference/commandline/buildx_create/#driver).
|
|
||||||
|
|
||||||
<!--- FIXME: for 0.9, make reference link relative --->
|
|
||||||
@@ -1,178 +0,0 @@
|
|||||||
# Remote driver
|
|
||||||
|
|
||||||
The buildx remote driver allows for more complex custom build workloads that
|
|
||||||
allow users to connect to external buildkit instances. This is useful for
|
|
||||||
scenarios that require manual management of the buildkit daemon, or where a
|
|
||||||
buildkit daemon is exposed from another source.
|
|
||||||
|
|
||||||
To connect to a running buildkitd instance:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create \
|
|
||||||
--name remote \
|
|
||||||
--driver remote \
|
|
||||||
tcp://localhost:1234
|
|
||||||
```
|
|
||||||
|
|
||||||
## Remote Buildkit over Unix sockets
|
|
||||||
|
|
||||||
In this scenario, we'll create a setup with buildkitd listening on a unix
|
|
||||||
socket, and have buildx connect through it.
|
|
||||||
|
|
||||||
Firstly, ensure that [buildkit](https://github.com/moby/buildkit) is installed.
|
|
||||||
For example, you can launch an instance of buildkitd with:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ sudo ./buildkitd --group $(id -gn) --addr unix://$HOME/buildkitd.sock
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, [see here](https://github.com/moby/buildkit/blob/master/docs/rootless.md)
|
|
||||||
for running buildkitd in rootless mode or [here](https://github.com/moby/buildkit/tree/master/examples/systemd)
|
|
||||||
for examples of running it as a systemd service.
|
|
||||||
|
|
||||||
You should now have a unix socket accessible to your user, that is available to
|
|
||||||
connect to:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ ls -lh /home/user/buildkitd.sock
|
|
||||||
srw-rw---- 1 root user 0 May 5 11:04 /home/user/buildkitd.sock
|
|
||||||
```
|
|
||||||
|
|
||||||
You can then connect buildx to it with the remote driver:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create \
|
|
||||||
--name remote-unix \
|
|
||||||
--driver remote \
|
|
||||||
unix://$HOME/buildkitd.sock
|
|
||||||
```
|
|
||||||
|
|
||||||
If you list available builders, you should then see `remote-unix` among them:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx ls
|
|
||||||
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
|
|
||||||
remote-unix remote
|
|
||||||
remote-unix0 unix:///home/.../buildkitd.sock running linux/amd64, linux/amd64/v2, linux/amd64/v3, linux/386
|
|
||||||
default * docker
|
|
||||||
default default running linux/amd64, linux/386
|
|
||||||
```
|
|
||||||
|
|
||||||
We can switch to this new builder as the default using `docker buildx use remote-unix`,
|
|
||||||
or specify it per build:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --builder=remote-unix -t test --load .
|
|
||||||
```
|
|
||||||
|
|
||||||
(remember that `--load` is necessary when not using the default `docker`
|
|
||||||
driver, to load the build result into the docker daemon)
|
|
||||||
|
|
||||||
## Remote Buildkit in Docker container
|
|
||||||
|
|
||||||
In this scenario, we'll create a similar setup to the `docker-container`
|
|
||||||
driver, by manually booting a buildkit docker container and connecting to it
|
|
||||||
using the buildx remote driver. In most cases you'd probably just use the
|
|
||||||
`docker-container` driver that connects to buildkit through the Docker daemon,
|
|
||||||
but in this case we manually create a container and access it via it's exposed
|
|
||||||
port.
|
|
||||||
|
|
||||||
First, we need to generate certificates for buildkit - you can use the
|
|
||||||
[create-certs.sh](https://github.com/moby/buildkit/v0.10.3/master/examples/kubernetes/create-certs.sh)
|
|
||||||
script as a starting point. Note, that while it is *possible* to expose
|
|
||||||
buildkit over TCP without using TLS, it is **not recommended**, since this will
|
|
||||||
allow arbitrary access to buildkit without credentials.
|
|
||||||
|
|
||||||
With our certificates generated in `.certs/`, we startup the container:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker run -d --rm \
|
|
||||||
--name=remote-buildkitd \
|
|
||||||
--privileged \
|
|
||||||
-p 1234:1234 \
|
|
||||||
-v $PWD/.certs:/etc/buildkit/certs \
|
|
||||||
moby/buildkit:latest \
|
|
||||||
--addr tcp://0.0.0.0:1234 \
|
|
||||||
--tlscacert /etc/buildkit/certs/ca.pem \
|
|
||||||
--tlscert /etc/buildkit/certs/daemon-cert.pem \
|
|
||||||
--tlskey /etc/buildkit/certs/daemon-key.pem
|
|
||||||
```
|
|
||||||
|
|
||||||
The above command starts a buildkit container and exposes the daemon's port
|
|
||||||
1234 to localhost.
|
|
||||||
|
|
||||||
We can now connect to this running container using buildx:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create \
|
|
||||||
--name remote-container \
|
|
||||||
--driver remote \
|
|
||||||
--driver-opt cacert=.certs/ca.pem,cert=.certs/client-cert.pem,key=.certs/client-key.pem,servername=... \
|
|
||||||
tcp://localhost:1234
|
|
||||||
```
|
|
||||||
|
|
||||||
Alternatively, we could use the `docker-container://` URL scheme to connect
|
|
||||||
to the buildkit container without specifying a port:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create \
|
|
||||||
--name remote-container \
|
|
||||||
--driver remote \
|
|
||||||
docker-container://remote-container
|
|
||||||
```
|
|
||||||
|
|
||||||
## Remote Buildkit in Kubernetes
|
|
||||||
|
|
||||||
In this scenario, we'll create a similar setup to the `kubernetes` driver by
|
|
||||||
manually creating a buildkit `Deployment`. While the `kubernetes` driver will
|
|
||||||
do this under-the-hood, it might sometimes be desirable to scale buildkit
|
|
||||||
manually. Additionally, when executing builds from inside Kubernetes pods,
|
|
||||||
the buildx builder will need to be recreated from within each pod or copied
|
|
||||||
between them.
|
|
||||||
|
|
||||||
Firstly, we can create a kubernetes deployment of buildkitd, as per the
|
|
||||||
instructions [here](https://github.com/moby/buildkit/tree/master/examples/kubernetes).
|
|
||||||
Following the guide, we setup certificates for the buildkit daemon and client
|
|
||||||
(as above using [create-certs.sh](https://github.com/moby/buildkit/blob/v0.10.3/examples/kubernetes/create-certs.sh))
|
|
||||||
and create a `Deployment` of buildkit pods with a service that connects to
|
|
||||||
them.
|
|
||||||
|
|
||||||
Assuming that the service is called `buildkitd`, we can create a remote builder
|
|
||||||
in buildx, ensuring that the listed certificate files are present:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create \
|
|
||||||
--name remote-kubernetes \
|
|
||||||
--driver remote \
|
|
||||||
--driver-opt cacert=.certs/ca.pem,cert=.certs/client-cert.pem,key=.certs/client-key.pem \
|
|
||||||
tcp://buildkitd.default.svc:1234
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that the above will only work in-cluster (since the buildkit setup guide
|
|
||||||
only creates a ClusterIP service). To configure the builder to be accessible
|
|
||||||
remotely, you can use an appropriately configured Ingress, which is outside the
|
|
||||||
scope of this guide.
|
|
||||||
|
|
||||||
To access the service remotely, we can use the port forwarding mechanism in
|
|
||||||
kubectl:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ kubectl port-forward svc/buildkitd 1234:1234
|
|
||||||
```
|
|
||||||
|
|
||||||
Then you can simply point the remote driver at `tcp://localhost:1234`.
|
|
||||||
|
|
||||||
Alternatively, we could use the `kube-pod://` URL scheme to connect
|
|
||||||
directly to a buildkit pod through the kubernetes api (note that this method
|
|
||||||
will only connect to a single pod in the deployment):
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ kubectl get pods --selector=app=buildkitd -o json | jq -r '.items[].metadata.name
|
|
||||||
buildkitd-XXXXXXXXXX-xxxxx
|
|
||||||
$ docker buildx create \
|
|
||||||
--name remote-container \
|
|
||||||
--driver remote \
|
|
||||||
kube-pod://buildkitd-XXXXXXXXXX-xxxxx
|
|
||||||
```
|
|
||||||
|
|
||||||
<!--- FIXME: for 0.9, add further reading section with link to reference --->
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
# OpenTelemetry support
|
|
||||||
|
|
||||||
To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set
|
|
||||||
`JAEGER_TRACE` environment variable to the collection address using a `driver-opt`.
|
|
||||||
|
|
||||||
First create a Jaeger container:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker run -d --name jaeger -p "6831:6831/udp" -p "16686:16686" jaegertracing/all-in-one
|
|
||||||
```
|
|
||||||
|
|
||||||
Then [create a `docker-container` builder](https://docs.docker.com/engine/reference/commandline/buildx_create/)
|
|
||||||
that will use the Jaeger instance via the `JAEGER_TRACE` env var:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create --use \
|
|
||||||
--name mybuilder \
|
|
||||||
--driver docker-container \
|
|
||||||
--driver-opt "network=host" \
|
|
||||||
--driver-opt "env.JAEGER_TRACE=localhost:6831"
|
|
||||||
```
|
|
||||||
|
|
||||||
Boot and [inspect `mybuilder`](https://docs.docker.com/engine/reference/commandline/buildx_inspect/):
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx inspect --bootstrap
|
|
||||||
```
|
|
||||||
|
|
||||||
Buildx commands should be traced at `http://127.0.0.1:16686/`:
|
|
||||||
|
|
||||||

|
|
||||||
@@ -1,62 +0,0 @@
|
|||||||
# Registry mirror
|
|
||||||
|
|
||||||
You can define a registry mirror to use for your builds by providing a [BuildKit daemon configuration](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md)
|
|
||||||
while creating a builder with the [`--config` flags](https://docs.docker.com/engine/reference/commandline/buildx_create/#config).
|
|
||||||
|
|
||||||
```toml
|
|
||||||
# /etc/buildkitd.toml
|
|
||||||
debug = true
|
|
||||||
[registry."docker.io"]
|
|
||||||
mirrors = ["mirror.gcr.io"]
|
|
||||||
```
|
|
||||||
|
|
||||||
> **Note**
|
|
||||||
>
|
|
||||||
> `debug = true` has been added to be able to debug requests
|
|
||||||
> in the BuildKit daemon and see if the mirror is effectively used.
|
|
||||||
|
|
||||||
Then [create a `docker-container` builder](https://docs.docker.com/engine/reference/commandline/buildx_create/)
|
|
||||||
that will use this BuildKit configuration:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create --use \
|
|
||||||
--name mybuilder \
|
|
||||||
--driver docker-container \
|
|
||||||
--config /etc/buildkitd.toml
|
|
||||||
```
|
|
||||||
|
|
||||||
Boot and [inspect `mybuilder`](https://docs.docker.com/engine/reference/commandline/buildx_inspect/):
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx inspect --bootstrap
|
|
||||||
```
|
|
||||||
|
|
||||||
Build an image:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --load . -f-<<EOF
|
|
||||||
FROM alpine
|
|
||||||
RUN echo "hello world"
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
|
|
||||||
Now let's check the BuildKit logs in the builder container:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker logs buildx_buildkit_mybuilder0
|
|
||||||
```
|
|
||||||
```text
|
|
||||||
...
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg="do request" request.header.accept="application/vnd.docker.container.image.v1+json, */*" request.header.user-agent=containerd/1.5.8+unknown request.method=GET spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg="fetch response received" response.header.accept-ranges=bytes response.header.age=1356 response.header.alt-svc="h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"" response.header.cache-control="public, max-age=3600" response.header.content-length=1469 response.header.content-type=application/octet-stream response.header.date="Sun, 06 Feb 2022 17:25:17 GMT" response.header.etag="\"774380abda8f4eae9a149e5d5d3efc83\"" response.header.expires="Sun, 06 Feb 2022 18:25:17 GMT" response.header.last-modified="Wed, 24 Nov 2021 21:07:57 GMT" response.header.server=UploadServer response.header.x-goog-generation=1637788077652182 response.header.x-goog-hash="crc32c=V3DSrg==" response.header.x-goog-hash.1="md5=d0OAq9qPTq6aFJ5dXT78gw==" response.header.x-goog-metageneration=1 response.header.x-goog-storage-class=STANDARD response.header.x-goog-stored-content-encoding=identity response.header.x-goog-stored-content-length=1469 response.header.x-guploader-uploadid=ADPycduqQipVAXc3tzXmTzKQ2gTT6CV736B2J628smtD1iDytEyiYCgvvdD8zz9BT1J1sASUq9pW_ctUyC4B-v2jvhIxnZTlKg response.status="200 OK" spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg="fetch response received" response.header.accept-ranges=bytes response.header.age=760 response.header.alt-svc="h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"" response.header.cache-control="public, max-age=3600" response.header.content-length=1471 response.header.content-type=application/octet-stream response.header.date="Sun, 06 Feb 2022 17:35:13 GMT" response.header.etag="\"35d688bd15327daafcdb4d4395e616a8\"" response.header.expires="Sun, 06 Feb 2022 18:35:13 GMT" response.header.last-modified="Wed, 24 Nov 2021 21:07:12 GMT" response.header.server=UploadServer response.header.x-goog-generation=1637788032100793 response.header.x-goog-hash="crc32c=aWgRjA==" response.header.x-goog-hash.1="md5=NdaIvRUyfar8201DleYWqA==" response.header.x-goog-metageneration=1 response.header.x-goog-storage-class=STANDARD response.header.x-goog-stored-content-encoding=identity response.header.x-goog-stored-content-length=1471 response.header.x-guploader-uploadid=ADPycdtR-gJYwC7yHquIkJWFFG8FovDySvtmRnZBqlO3yVDanBXh_VqKYt400yhuf0XbQ3ZMB9IZV2vlcyHezn_Pu3a1SMMtiw response.status="200 OK" spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg="do request" request.header.accept="application/vnd.docker.image.rootfs.diff.tar.gzip, */*" request.header.user-agent=containerd/1.5.8+unknown request.method=GET spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg="fetch response received" response.header.accept-ranges=bytes response.header.age=1356 response.header.alt-svc="h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"" response.header.cache-control="public, max-age=3600" response.header.content-length=2818413 response.header.content-type=application/octet-stream response.header.date="Sun, 06 Feb 2022 17:25:17 GMT" response.header.etag="\"1d55e7be5a77c4a908ad11bc33ebea1c\"" response.header.expires="Sun, 06 Feb 2022 18:25:17 GMT" response.header.last-modified="Wed, 24 Nov 2021 21:07:06 GMT" response.header.server=UploadServer response.header.x-goog-generation=1637788026431708 response.header.x-goog-hash="crc32c=ZojF+g==" response.header.x-goog-hash.1="md5=HVXnvlp3xKkIrRG8M+vqHA==" response.header.x-goog-metageneration=1 response.header.x-goog-storage-class=STANDARD response.header.x-goog-stored-content-encoding=identity response.header.x-goog-stored-content-length=2818413 response.header.x-guploader-uploadid=ADPycdsebqxiTBJqZ0bv9zBigjFxgQydD2ESZSkKchpE0ILlN9Ibko3C5r4fJTJ4UR9ddp-UBd-2v_4eRpZ8Yo2llW_j4k8WhQ response.status="200 OK" spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
As you can see, requests come from the GCR registry mirror (`response.header.x-goog*`).
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
# Resource limiting
|
|
||||||
|
|
||||||
## Max parallelism
|
|
||||||
|
|
||||||
You can limit the parallelism of the BuildKit solver, which is particularly useful
|
|
||||||
for low-powered machines, using a [BuildKit daemon configuration](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md)
|
|
||||||
while creating a builder with the [`--config` flags](https://docs.docker.com/engine/reference/commandline/buildx_create/#config).
|
|
||||||
|
|
||||||
```toml
|
|
||||||
# /etc/buildkitd.toml
|
|
||||||
[worker.oci]
|
|
||||||
max-parallelism = 4
|
|
||||||
```
|
|
||||||
|
|
||||||
Now you can [create a `docker-container` builder](https://docs.docker.com/engine/reference/commandline/buildx_create/)
|
|
||||||
that will use this BuildKit configuration to limit parallelism.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create --use \
|
|
||||||
--name mybuilder \
|
|
||||||
--driver docker-container \
|
|
||||||
--config /etc/buildkitd.toml
|
|
||||||
```
|
|
||||||
|
|
||||||
## Limit on TCP connections
|
|
||||||
|
|
||||||
We are also now limiting TCP connections to **4 per registry** with an additional
|
|
||||||
connection not used for layer pulls and pushes. This limitation will be able to
|
|
||||||
manage TCP connection per host to avoid your build being stuck while pulling
|
|
||||||
images. The additional connection is used for metadata requests
|
|
||||||
(image config retrieval) to enhance the overall build time.
|
|
||||||
|
|
||||||
More info: [moby/buildkit#2259](https://github.com/moby/buildkit/pull/2259)
|
|
||||||
@@ -29,9 +29,9 @@ Extended build capabilities with BuildKit
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|||||||
@@ -9,22 +9,22 @@ Build from a file
|
|||||||
|
|
||||||
### Aliases
|
### Aliases
|
||||||
|
|
||||||
`docker buildx bake`, `docker buildx f`
|
`bake`, `f`
|
||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Build definition file |
|
| [`-f`](#file), [`--file stringArray`](#file) | Build definition file |
|
||||||
| `--load` | | | Shorthand for `--set=*.output=type=docker` |
|
| `--load` | Shorthand for `--set=*.output=type=docker` |
|
||||||
| `--metadata-file` | `string` | | Write build result metadata to the file |
|
| `--metadata-file string` | Write build result metadata to the file |
|
||||||
| [`--no-cache`](#no-cache) | | | Do not use cache when building the image |
|
| [`--no-cache`](#no-cache) | Do not use cache when building the image |
|
||||||
| [`--print`](#print) | | | Print the options without building |
|
| [`--print`](#print) | Print the options without building |
|
||||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
| [`--progress string`](#progress) | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||||
| [`--pull`](#pull) | | | Always attempt to pull all referenced images |
|
| [`--pull`](#pull) | Always attempt to pull a newer version of the image |
|
||||||
| `--push` | | | Shorthand for `--set=*.output=type=registry` |
|
| `--push` | Shorthand for `--set=*.output=type=registry` |
|
||||||
| [`--set`](#set) | `stringArray` | | Override target value (e.g., `targetpattern.key=value`) |
|
| [`--set stringArray`](#set) | Override target value (e.g., `targetpattern.key=value`) |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
@@ -34,14 +34,12 @@ Build from a file
|
|||||||
Bake is a high-level build command. Each specified target will run in parallel
|
Bake is a high-level build command. Each specified target will run in parallel
|
||||||
as part of the build.
|
as part of the build.
|
||||||
|
|
||||||
Read [High-level build options with Bake](https://docs.docker.com/build/bake/)
|
Read [High-level build options](https://github.com/docker/buildx#high-level-build-options)
|
||||||
guide for introduction to writing bake files.
|
for introduction.
|
||||||
|
|
||||||
> **Note**
|
Please note that `buildx bake` command may receive backwards incompatible
|
||||||
>
|
features in the future if needed. We are looking for feedback on improving the
|
||||||
> `buildx bake` command may receive backwards incompatible features in the future
|
command and extending the functionality further.
|
||||||
> if needed. We are looking for feedback on improving the command and extending
|
|
||||||
> the functionality further.
|
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
@@ -51,42 +49,166 @@ Same as [`buildx --builder`](buildx.md#builder).
|
|||||||
|
|
||||||
### <a name="file"></a> Specify a build definition file (-f, --file)
|
### <a name="file"></a> Specify a build definition file (-f, --file)
|
||||||
|
|
||||||
Use the `-f` / `--file` option to specify the build definition file to use.
|
By default, `buildx bake` looks for build definition files in the current
|
||||||
The file can be an HCL, JSON or Compose file. If multiple files are specified
|
directory, the following are parsed:
|
||||||
|
|
||||||
|
- `docker-compose.yml`
|
||||||
|
- `docker-compose.yaml`
|
||||||
|
- `docker-bake.json`
|
||||||
|
- `docker-bake.override.json`
|
||||||
|
- `docker-bake.hcl`
|
||||||
|
- `docker-bake.override.hcl`
|
||||||
|
|
||||||
|
Use the `-f` / `--file` option to specify the build definition file to use. The
|
||||||
|
file can be a Docker Compose, JSON or HCL file. If multiple files are specified
|
||||||
they are all read and configurations are combined.
|
they are all read and configurations are combined.
|
||||||
|
|
||||||
You can pass the names of the targets to build, to build only specific target(s).
|
The following example uses a Docker Compose file named `docker-compose.dev.yaml`
|
||||||
The following example builds the `db` and `webapp-release` targets that are
|
as build definition file, and builds all targets in the file:
|
||||||
defined in the `docker-bake.dev.hcl` file:
|
|
||||||
|
|
||||||
```hcl
|
```console
|
||||||
# docker-bake.dev.hcl
|
$ docker buildx bake -f docker-compose.dev.yaml
|
||||||
group "default" {
|
|
||||||
targets = ["db", "webapp-dev"]
|
[+] Building 66.3s (30/30) FINISHED
|
||||||
|
=> [frontend internal] load build definition from Dockerfile 0.1s
|
||||||
|
=> => transferring dockerfile: 36B 0.0s
|
||||||
|
=> [backend internal] load build definition from Dockerfile 0.2s
|
||||||
|
=> => transferring dockerfile: 3.73kB 0.0s
|
||||||
|
=> [database internal] load build definition from Dockerfile 0.1s
|
||||||
|
=> => transferring dockerfile: 5.77kB 0.0s
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
Pass the names of the targets to build, to build only specific target(s). The
|
||||||
|
following example builds the `backend` and `database` targets that are defined
|
||||||
|
in the `docker-compose.dev.yaml` file, skipping the build for the `frontend`
|
||||||
|
target:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake -f docker-compose.dev.yaml backend database
|
||||||
|
|
||||||
|
[+] Building 2.4s (13/13) FINISHED
|
||||||
|
=> [backend internal] load build definition from Dockerfile 0.1s
|
||||||
|
=> => transferring dockerfile: 81B 0.0s
|
||||||
|
=> [database internal] load build definition from Dockerfile 0.2s
|
||||||
|
=> => transferring dockerfile: 36B 0.0s
|
||||||
|
=> [backend internal] load .dockerignore 0.3s
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also use a remote `git` bake definition:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake "git://github.com/docker/cli#v20.10.11" --print
|
||||||
|
#1 [internal] load git source git://github.com/docker/cli#v20.10.11
|
||||||
|
#1 0.745 e8f1871b077b64bcb4a13334b7146492773769f7 refs/tags/v20.10.11
|
||||||
|
#1 2.022 From git://github.com/docker/cli
|
||||||
|
#1 2.022 * [new tag] v20.10.11 -> v20.10.11
|
||||||
|
#1 DONE 2.9s
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": {
|
||||||
|
"targets": [
|
||||||
|
"binary"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"binary": {
|
||||||
|
"context": "git://github.com/docker/cli#v20.10.11",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"BASE_VARIANT": "alpine",
|
||||||
|
"GO_STRIP": "",
|
||||||
|
"VERSION": ""
|
||||||
|
},
|
||||||
|
"target": "binary",
|
||||||
|
"platforms": [
|
||||||
|
"local"
|
||||||
|
],
|
||||||
|
"output": [
|
||||||
|
"build"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
target "webapp-dev" {
|
As you can see the context is fixed to `git://github.com/docker/cli` even if
|
||||||
dockerfile = "Dockerfile.webapp"
|
[no context is actually defined](https://github.com/docker/cli/blob/2776a6d694f988c0c1df61cad4bfac0f54e481c8/docker-bake.hcl#L17-L26)
|
||||||
tags = ["docker.io/username/webapp"]
|
in the definition.
|
||||||
}
|
|
||||||
|
|
||||||
target "webapp-release" {
|
If you want to access the main context for bake command from a bake file
|
||||||
inherits = ["webapp-dev"]
|
that has been imported remotely, you can use the `BAKE_CMD_CONTEXT` builtin var:
|
||||||
platforms = ["linux/amd64", "linux/arm64"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "db" {
|
```console
|
||||||
dockerfile = "Dockerfile.db"
|
$ cat https://raw.githubusercontent.com/tonistiigi/buildx/remote-test/docker-bake.hcl
|
||||||
tags = ["docker.io/username/db"]
|
target "default" {
|
||||||
|
context = BAKE_CMD_CONTEXT
|
||||||
|
dockerfile-inline = <<EOT
|
||||||
|
FROM alpine
|
||||||
|
WORKDIR /src
|
||||||
|
COPY . .
|
||||||
|
RUN ls -l && stop
|
||||||
|
EOT
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx bake -f docker-bake.dev.hcl db webapp-release
|
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" --print
|
||||||
|
{
|
||||||
|
"target": {
|
||||||
|
"default": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
See our [file definition](https://docs.docker.com/build/bake/file-definition/)
|
```console
|
||||||
guide for more details.
|
$ touch foo bar
|
||||||
|
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test"
|
||||||
|
...
|
||||||
|
> [4/4] RUN ls -l && stop:
|
||||||
|
#8 0.101 total 0
|
||||||
|
#8 0.102 -rw-r--r-- 1 root root 0 Jul 27 18:47 bar
|
||||||
|
#8 0.102 -rw-r--r-- 1 root root 0 Jul 27 18:47 foo
|
||||||
|
#8 0.102 /bin/sh: stop: not found
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" "git://github.com/docker/cli#v20.10.11" --print
|
||||||
|
#1 [internal] load git source git://github.com/tonistiigi/buildx#remote-test
|
||||||
|
#1 0.429 577303add004dd7efeb13434d69ea030d35f7888 refs/heads/remote-test
|
||||||
|
#1 CACHED
|
||||||
|
{
|
||||||
|
"target": {
|
||||||
|
"default": {
|
||||||
|
"context": "git://github.com/docker/cli#v20.10.11",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" "git://github.com/docker/cli#v20.10.11"
|
||||||
|
...
|
||||||
|
> [4/4] RUN ls -l && stop:
|
||||||
|
#8 0.136 drwxrwxrwx 5 root root 4096 Jul 27 18:31 kubernetes
|
||||||
|
#8 0.136 drwxrwxrwx 3 root root 4096 Jul 27 18:31 man
|
||||||
|
#8 0.136 drwxrwxrwx 2 root root 4096 Jul 27 18:31 opts
|
||||||
|
#8 0.136 -rw-rw-rw- 1 root root 1893 Jul 27 18:31 poule.yml
|
||||||
|
#8 0.136 drwxrwxrwx 7 root root 4096 Jul 27 18:31 scripts
|
||||||
|
#8 0.136 drwxrwxrwx 3 root root 4096 Jul 27 18:31 service
|
||||||
|
#8 0.136 drwxrwxrwx 2 root root 4096 Jul 27 18:31 templates
|
||||||
|
#8 0.136 drwxrwxrwx 10 root root 4096 Jul 27 18:31 vendor
|
||||||
|
#8 0.136 -rwxrwxrwx 1 root root 9620 Jul 27 18:31 vendor.conf
|
||||||
|
#8 0.136 /bin/sh: stop: not found
|
||||||
|
```
|
||||||
|
|
||||||
### <a name="no-cache"></a> Do not use cache when building the image (--no-cache)
|
### <a name="no-cache"></a> Do not use cache when building the image (--no-cache)
|
||||||
|
|
||||||
@@ -121,7 +243,27 @@ $ docker buildx bake -f docker-bake.hcl --print db
|
|||||||
|
|
||||||
### <a name="progress"></a> Set type of progress output (--progress)
|
### <a name="progress"></a> Set type of progress output (--progress)
|
||||||
|
|
||||||
Same as [`build --progress`](buildx_build.md#progress).
|
Same as [`build --progress`](buildx_build.md#progress). Set type of progress
|
||||||
|
output (auto, plain, tty). Use plain to show container output (default "auto").
|
||||||
|
|
||||||
|
> You can also use the `BUILDKIT_PROGRESS` environment variable to set its value.
|
||||||
|
|
||||||
|
The following example uses `plain` output during the build:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --progress=plain
|
||||||
|
|
||||||
|
#2 [backend internal] load build definition from Dockerfile.test
|
||||||
|
#2 sha256:de70cb0bb6ed8044f7b9b1b53b67f624e2ccfb93d96bb48b70c1fba562489618
|
||||||
|
#2 ...
|
||||||
|
|
||||||
|
#1 [database internal] load build definition from Dockerfile.test
|
||||||
|
#1 sha256:453cb50abd941762900a1212657a35fc4aad107f5d180b0ee9d93d6b74481bce
|
||||||
|
#1 transferring dockerfile: 36B done
|
||||||
|
#1 DONE 0.1s
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### <a name="pull"></a> Always attempt to pull a newer version of the image (--pull)
|
### <a name="pull"></a> Always attempt to pull a newer version of the image (--pull)
|
||||||
|
|
||||||
@@ -136,6 +278,9 @@ Same as `build --pull`.
|
|||||||
Override target configurations from command line. The pattern matching syntax
|
Override target configurations from command line. The pattern matching syntax
|
||||||
is defined in https://golang.org/pkg/path/#Match.
|
is defined in https://golang.org/pkg/path/#Match.
|
||||||
|
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx bake --set target.args.mybuildarg=value
|
$ docker buildx bake --set target.args.mybuildarg=value
|
||||||
$ docker buildx bake --set target.platform=linux/arm64
|
$ docker buildx bake --set target.platform=linux/arm64
|
||||||
@@ -145,20 +290,609 @@ $ docker buildx bake --set foo*.no-cache # bypass caching only for
|
|||||||
```
|
```
|
||||||
|
|
||||||
Complete list of overridable fields:
|
Complete list of overridable fields:
|
||||||
|
`args`, `cache-from`, `cache-to`, `context`, `dockerfile`, `labels`, `no-cache`,
|
||||||
|
`output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target`
|
||||||
|
|
||||||
* `args`
|
### File definition
|
||||||
* `cache-from`
|
|
||||||
* `cache-to`
|
In addition to compose files, bake supports a JSON and an equivalent HCL file
|
||||||
* `context`
|
format for defining build groups and targets.
|
||||||
* `dockerfile`
|
|
||||||
* `labels`
|
A target reflects a single docker build invocation with the same options that
|
||||||
* `no-cache`
|
you would specify for `docker build`. A group is a grouping of targets.
|
||||||
* `no-cache-filter`
|
|
||||||
* `output`
|
Multiple files can include the same target and final build options will be
|
||||||
* `platform`
|
determined by merging them together.
|
||||||
* `pull`
|
|
||||||
* `push`
|
In the case of compose files, each service corresponds to a target.
|
||||||
* `secrets`
|
|
||||||
* `ssh`
|
A group can specify its list of targets with the `targets` option. A target can
|
||||||
* `tags`
|
inherit build options by setting the `inherits` option to the list of targets or
|
||||||
* `target`
|
groups to inherit from.
|
||||||
|
|
||||||
|
Note: Design of bake command is work in progress, the user experience may change
|
||||||
|
based on feedback.
|
||||||
|
|
||||||
|
|
||||||
|
**Example HCL definition**
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
group "default" {
|
||||||
|
targets = ["db", "webapp-dev"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "webapp-dev" {
|
||||||
|
dockerfile = "Dockerfile.webapp"
|
||||||
|
tags = ["docker.io/username/webapp"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "webapp-release" {
|
||||||
|
inherits = ["webapp-dev"]
|
||||||
|
platforms = ["linux/amd64", "linux/arm64"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "db" {
|
||||||
|
dockerfile = "Dockerfile.db"
|
||||||
|
tags = ["docker.io/username/db"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Complete list of valid target fields:
|
||||||
|
|
||||||
|
`args`, `cache-from`, `cache-to`, `context`, `dockerfile`, `inherits`, `labels`,
|
||||||
|
`no-cache`, `output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target`
|
||||||
|
|
||||||
|
### Global scope attributes
|
||||||
|
|
||||||
|
You can define global scope attributes in HCL/JSON and use them for code reuse
|
||||||
|
and setting values for variables. This means you can do a "data-only" HCL file
|
||||||
|
with the values you want to set/override and use it in the list of regular
|
||||||
|
output files.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake.hcl
|
||||||
|
variable "FOO" {
|
||||||
|
default = "abc"
|
||||||
|
}
|
||||||
|
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
v1 = "pre-${FOO}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
You can use this file directly:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print app
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": {
|
||||||
|
"targets": [
|
||||||
|
"app"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"app": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"v1": "pre-abc"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Or create an override configuration file:
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# env.hcl
|
||||||
|
WHOAMI="myuser"
|
||||||
|
FOO="def-${WHOAMI}"
|
||||||
|
```
|
||||||
|
|
||||||
|
And invoke bake together with both of the files:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake -f docker-bake.hcl -f env.hcl --print app
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": {
|
||||||
|
"targets": [
|
||||||
|
"app"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"app": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"v1": "pre-def-myuser"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### HCL variables and functions
|
||||||
|
|
||||||
|
Similar to how Terraform provides a way to [define variables](https://www.terraform.io/docs/configuration/variables.html#declaring-an-input-variable),
|
||||||
|
the HCL file format also supports variable block definitions. These can be used
|
||||||
|
to define variables with values provided by the current environment, or a
|
||||||
|
default value when unset.
|
||||||
|
|
||||||
|
A [set of generally useful functions](https://github.com/docker/buildx/blob/master/bake/hclparser/stdlib.go)
|
||||||
|
provided by [go-cty](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib)
|
||||||
|
are available for use in HCL files. In addition, [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc)
|
||||||
|
are also supported.
|
||||||
|
|
||||||
|
#### Using interpolation to tag an image with the git sha
|
||||||
|
|
||||||
|
Bake supports variable blocks which are assigned to matching environment
|
||||||
|
variables or default values.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake.hcl
|
||||||
|
variable "TAG" {
|
||||||
|
default = "latest"
|
||||||
|
}
|
||||||
|
|
||||||
|
group "default" {
|
||||||
|
targets = ["webapp"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "webapp" {
|
||||||
|
tags = ["docker.io/username/webapp:${TAG}"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print webapp
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": {
|
||||||
|
"targets": [
|
||||||
|
"webapp"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"webapp": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"tags": [
|
||||||
|
"docker.io/username/webapp:latest"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ TAG=$(git rev-parse --short HEAD) docker buildx bake --print webapp
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": {
|
||||||
|
"targets": [
|
||||||
|
"webapp"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"webapp": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"tags": [
|
||||||
|
"docker.io/username/webapp:985e9e9"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Using the `add` function
|
||||||
|
|
||||||
|
You can use [`go-cty` stdlib functions](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib).
|
||||||
|
Here we are using the `add` function.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake.hcl
|
||||||
|
variable "TAG" {
|
||||||
|
default = "latest"
|
||||||
|
}
|
||||||
|
|
||||||
|
group "default" {
|
||||||
|
targets = ["webapp"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "webapp" {
|
||||||
|
args = {
|
||||||
|
buildno = "${add(123, 1)}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print webapp
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": {
|
||||||
|
"targets": [
|
||||||
|
"webapp"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"webapp": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"buildno": "124"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Defining an `increment` function
|
||||||
|
|
||||||
|
It also supports [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc).
|
||||||
|
The following example defines a simple an `increment` function.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake.hcl
|
||||||
|
function "increment" {
|
||||||
|
params = [number]
|
||||||
|
result = number + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
group "default" {
|
||||||
|
targets = ["webapp"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "webapp" {
|
||||||
|
args = {
|
||||||
|
buildno = "${increment(123)}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print webapp
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": {
|
||||||
|
"targets": [
|
||||||
|
"webapp"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"webapp": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"buildno": "124"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Only adding tags if a variable is not empty using an `notequal`
|
||||||
|
|
||||||
|
Here we are using the conditional `notequal` function which is just for
|
||||||
|
symmetry with the `equal` one.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake.hcl
|
||||||
|
variable "TAG" {default="" }
|
||||||
|
|
||||||
|
group "default" {
|
||||||
|
targets = [
|
||||||
|
"webapp",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "webapp" {
|
||||||
|
context="."
|
||||||
|
dockerfile="Dockerfile"
|
||||||
|
tags = [
|
||||||
|
"my-image:latest",
|
||||||
|
notequal("",TAG) ? "my-image:${TAG}": "",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print webapp
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": {
|
||||||
|
"targets": [
|
||||||
|
"webapp"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"webapp": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"tags": [
|
||||||
|
"my-image:latest"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Using variables in functions
|
||||||
|
|
||||||
|
You can refer variables to other variables like the target blocks can. Stdlib
|
||||||
|
functions can also be called but user functions can't at the moment.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake.hcl
|
||||||
|
variable "REPO" {
|
||||||
|
default = "user/repo"
|
||||||
|
}
|
||||||
|
|
||||||
|
function "tag" {
|
||||||
|
params = [tag]
|
||||||
|
result = ["${REPO}:${tag}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "webapp" {
|
||||||
|
tags = tag("v1")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print webapp
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": {
|
||||||
|
"targets": [
|
||||||
|
"webapp"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"webapp": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"tags": [
|
||||||
|
"user/repo:v1"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Using variables in variables across files
|
||||||
|
|
||||||
|
When multiple files are specified, one file can use variables defined in
|
||||||
|
another file.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake1.hcl
|
||||||
|
variable "FOO" {
|
||||||
|
default = upper("${BASE}def")
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "BAR" {
|
||||||
|
default = "-${FOO}-"
|
||||||
|
}
|
||||||
|
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
v1 = "pre-${BAR}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake2.hcl
|
||||||
|
variable "BASE" {
|
||||||
|
default = "abc"
|
||||||
|
}
|
||||||
|
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
v2 = "${FOO}-post"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake -f docker-bake1.hcl -f docker-bake2.hcl --print app
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": {
|
||||||
|
"targets": [
|
||||||
|
"app"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"app": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"v1": "pre--ABCDEF-",
|
||||||
|
"v2": "ABCDEF-post"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Using typed variables
|
||||||
|
|
||||||
|
Non-string variables are also accepted. The value passed with env is parsed
|
||||||
|
into suitable type first.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake.hcl
|
||||||
|
variable "FOO" {
|
||||||
|
default = 3
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "IS_FOO" {
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
v1 = FOO > 5 ? "higher" : "lower"
|
||||||
|
v2 = IS_FOO ? "yes" : "no"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print app
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": {
|
||||||
|
"targets": [
|
||||||
|
"app"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"app": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"v1": "lower",
|
||||||
|
"v2": "yes"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Extension field with Compose
|
||||||
|
|
||||||
|
[Special extension](https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension)
|
||||||
|
field `x-bake` can be used in your compose file to evaluate fields that are not
|
||||||
|
(yet) available in the [build definition](https://github.com/compose-spec/compose-spec/blob/master/build.md#build-definition).
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# docker-compose.yml
|
||||||
|
services:
|
||||||
|
addon:
|
||||||
|
image: ct-addon:bar
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: ./Dockerfile
|
||||||
|
args:
|
||||||
|
CT_ECR: foo
|
||||||
|
CT_TAG: bar
|
||||||
|
x-bake:
|
||||||
|
tags:
|
||||||
|
- ct-addon:foo
|
||||||
|
- ct-addon:alp
|
||||||
|
platforms:
|
||||||
|
- linux/amd64
|
||||||
|
- linux/arm64
|
||||||
|
cache-from:
|
||||||
|
- user/app:cache
|
||||||
|
- type=local,src=path/to/cache
|
||||||
|
cache-to: type=local,dest=path/to/cache
|
||||||
|
pull: true
|
||||||
|
|
||||||
|
aws:
|
||||||
|
image: ct-fake-aws:bar
|
||||||
|
build:
|
||||||
|
dockerfile: ./aws.Dockerfile
|
||||||
|
args:
|
||||||
|
CT_ECR: foo
|
||||||
|
CT_TAG: bar
|
||||||
|
x-bake:
|
||||||
|
secret:
|
||||||
|
- id=mysecret,src=./secret
|
||||||
|
- id=mysecret2,src=./secret2
|
||||||
|
platforms: linux/arm64
|
||||||
|
output: type=docker
|
||||||
|
no-cache: true
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": {
|
||||||
|
"targets": [
|
||||||
|
"aws",
|
||||||
|
"addon"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"addon": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "./Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"CT_ECR": "foo",
|
||||||
|
"CT_TAG": "bar"
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"ct-addon:foo",
|
||||||
|
"ct-addon:alp"
|
||||||
|
],
|
||||||
|
"cache-from": [
|
||||||
|
"user/app:cache",
|
||||||
|
"type=local,src=path/to/cache"
|
||||||
|
],
|
||||||
|
"cache-to": [
|
||||||
|
"type=local,dest=path/to/cache"
|
||||||
|
],
|
||||||
|
"platforms": [
|
||||||
|
"linux/amd64",
|
||||||
|
"linux/arm64"
|
||||||
|
],
|
||||||
|
"pull": true
|
||||||
|
},
|
||||||
|
"aws": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "./aws.Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"CT_ECR": "foo",
|
||||||
|
"CT_TAG": "bar"
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"ct-fake-aws:bar"
|
||||||
|
],
|
||||||
|
"secret": [
|
||||||
|
"id=mysecret,src=./secret",
|
||||||
|
"id=mysecret2,src=./secret2"
|
||||||
|
],
|
||||||
|
"platforms": [
|
||||||
|
"linux/arm64"
|
||||||
|
],
|
||||||
|
"output": [
|
||||||
|
"type=docker"
|
||||||
|
],
|
||||||
|
"no-cache": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Complete list of valid fields for `x-bake`:
|
||||||
|
|
||||||
|
`tags`, `cache-from`, `cache-to`, `secret`, `ssh`, `platforms`, `output`,
|
||||||
|
`pull`, `no-cache`
|
||||||
|
|
||||||
|
### Built-in variables
|
||||||
|
|
||||||
|
* `BAKE_CMD_CONTEXT` can be used to access the main `context` for bake command
|
||||||
|
from a bake file that has been [imported remotely](#file).
|
||||||
|
* `BAKE_LOCAL_PLATFORM` returns the current platform's default platform
|
||||||
|
specification (e.g. `linux/amd64`).
|
||||||
|
|||||||
@@ -9,40 +9,38 @@ Start a build
|
|||||||
|
|
||||||
### Aliases
|
### Aliases
|
||||||
|
|
||||||
`docker buildx build`, `docker buildx b`
|
`build`, `b`
|
||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--add-host`](https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host) | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
| [`--add-host stringSlice`](https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host) | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||||
| [`--allow`](#allow) | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
| [`--allow stringSlice`](#allow) | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||||
| [`--build-arg`](#build-arg) | `stringArray` | | Set build-time variables |
|
| [`--build-arg stringArray`](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg) | Set build-time variables |
|
||||||
| [`--build-context`](#build-context) | `stringArray` | | Additional build contexts (e.g., name=path) |
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--cache-from stringArray`](#cache-from) | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
||||||
| [`--cache-from`](#cache-from) | `stringArray` | | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
| [`--cache-to stringArray`](#cache-to) | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
||||||
| [`--cache-to`](#cache-to) | `stringArray` | | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
| [`--cgroup-parent string`](https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent) | Optional parent cgroup for the container |
|
||||||
| [`--cgroup-parent`](https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent) | `string` | | Optional parent cgroup for the container |
|
| [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file string`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||||
| [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
| `--iidfile string` | Write the image ID to the file |
|
||||||
| `--iidfile` | `string` | | Write the image ID to the file |
|
| `--label stringArray` | Set metadata for an image |
|
||||||
| `--label` | `stringArray` | | Set metadata for an image |
|
| [`--load`](#load) | Shorthand for `--output=type=docker` |
|
||||||
| [`--load`](#load) | | | Shorthand for `--output=type=docker` |
|
| `--metadata-file string` | Write build result metadata to the file |
|
||||||
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to the file |
|
| `--network string` | Set the networking mode for the RUN instructions during build |
|
||||||
| `--network` | `string` | `default` | Set the networking mode for the `RUN` instructions during build |
|
| `--no-cache` | Do not use cache when building the image |
|
||||||
| `--no-cache` | | | Do not use cache when building the image |
|
| [`-o`](#output), [`--output stringArray`](#output) | Output destination (format: `type=local,dest=path`) |
|
||||||
| `--no-cache-filter` | `stringArray` | | Do not cache specified stages |
|
| [`--platform stringArray`](#platform) | Set target platform for build |
|
||||||
| [`-o`](#output), [`--output`](#output) | `stringArray` | | Output destination (format: `type=local,dest=path`) |
|
| [`--progress string`](#progress) | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||||
| [`--platform`](#platform) | `stringArray` | | Set target platform for build |
|
| `--pull` | Always attempt to pull a newer version of the image |
|
||||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
| [`--push`](#push) | Shorthand for `--output=type=registry` |
|
||||||
| `--pull` | | | Always attempt to pull all referenced images |
|
| `-q`, `--quiet` | Suppress the build output and print image ID on success |
|
||||||
| [`--push`](#push) | | | Shorthand for `--output=type=registry` |
|
| `--secret stringArray` | Secret file to expose to the build (format: `id=mysecret,src=/local/secret`) |
|
||||||
| `-q`, `--quiet` | | | Suppress the build output and print image ID on success |
|
| [`--shm-size bytes`](#shm-size) | Size of `/dev/shm` |
|
||||||
| [`--secret`](#secret) | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
| `--ssh stringArray` | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
||||||
| [`--shm-size`](#shm-size) | `bytes` | `0` | Size of `/dev/shm` |
|
| [`-t`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t), [`--tag stringArray`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t) | Name and optionally a tag (format: `name:tag`) |
|
||||||
| [`--ssh`](#ssh) | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
| [`--target string`](https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target) | Set the target build stage to build. |
|
||||||
| [`-t`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t), [`--tag`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t) | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
| [`--ulimit ulimit`](#ulimit) | Ulimit options |
|
||||||
| [`--target`](https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target) | `string` | | Set the target build stage to build |
|
|
||||||
| [`--ulimit`](#ulimit) | `ulimit` | | Ulimit options |
|
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
@@ -54,228 +52,80 @@ to the UI of `docker build` command and takes the same flags and arguments.
|
|||||||
|
|
||||||
For documentation on most of these flags, refer to the [`docker build`
|
For documentation on most of these flags, refer to the [`docker build`
|
||||||
documentation](https://docs.docker.com/engine/reference/commandline/build/). In
|
documentation](https://docs.docker.com/engine/reference/commandline/build/). In
|
||||||
here we'll document a subset of the new flags.
|
here we’ll document a subset of the new flags.
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
|
|
||||||
|
|
||||||
```
|
|
||||||
--allow=ENTITLEMENT
|
|
||||||
```
|
|
||||||
|
|
||||||
Allow extra privileged entitlement. List of entitlements:
|
|
||||||
|
|
||||||
- `network.host` - Allows executions with host networking.
|
|
||||||
- `security.insecure` - Allows executions without sandbox. See
|
|
||||||
[related Dockerfile extensions](https://docs.docker.com/engine/reference/builder/#run---securitysandbox).
|
|
||||||
|
|
||||||
For entitlements to be enabled, the `buildkitd` daemon also needs to allow them
|
|
||||||
with `--allow-insecure-entitlement` (see [`create --buildkitd-flags`](buildx_create.md#buildkitd-flags))
|
|
||||||
|
|
||||||
**Examples**
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create --use --name insecure-builder --buildkitd-flags '--allow-insecure-entitlement security.insecure'
|
|
||||||
$ docker buildx build --allow security.insecure .
|
|
||||||
```
|
|
||||||
|
|
||||||
### <a name="build-arg"></a> Set build-time variables (--build-arg)
|
|
||||||
|
|
||||||
Same as [`docker build` command](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg).
|
|
||||||
|
|
||||||
There are also useful built-in build args like:
|
|
||||||
|
|
||||||
* `BUILDKIT_CONTEXT_KEEP_GIT_DIR=<bool>` trigger git context to keep the `.git` directory
|
|
||||||
* `BUILDKIT_INLINE_BUILDINFO_ATTRS=<bool>` inline build info attributes in image config or not
|
|
||||||
* `BUILDKIT_INLINE_CACHE=<bool>` inline cache metadata to image config or not
|
|
||||||
* `BUILDKIT_MULTI_PLATFORM=<bool>` opt into deterministic output regardless of multi-platform output or not
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --build-arg BUILDKIT_MULTI_PLATFORM=1 .
|
|
||||||
```
|
|
||||||
|
|
||||||
> **Note**
|
|
||||||
>
|
|
||||||
> More built-in build args can be found in [Dockerfile reference docs](https://docs.docker.com/engine/reference/builder/#buildkit-built-in-build-args).
|
|
||||||
|
|
||||||
### <a name="build-context"></a> Additional build contexts (--build-context)
|
|
||||||
|
|
||||||
```
|
|
||||||
--build-context=name=VALUE
|
|
||||||
```
|
|
||||||
|
|
||||||
Define additional build context with specified contents. In Dockerfile the context can be accessed when `FROM name` or `--from=name` is used.
|
|
||||||
When Dockerfile defines a stage with the same name it is overwritten.
|
|
||||||
|
|
||||||
The value can be a local source directory, [local OCI layout compliant directory](https://github.com/opencontainers/image-spec/blob/main/image-layout.md), container image (with docker-image:// prefix), Git or HTTP URL.
|
|
||||||
|
|
||||||
Replace `alpine:latest` with a pinned one:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --build-context alpine=docker-image://alpine@sha256:0123456789 .
|
|
||||||
```
|
|
||||||
|
|
||||||
Expose a secondary local source directory:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --build-context project=path/to/project/source .
|
|
||||||
# docker buildx build --build-context project=https://github.com/myuser/project.git .
|
|
||||||
```
|
|
||||||
|
|
||||||
```Dockerfile
|
|
||||||
FROM alpine
|
|
||||||
COPY --from=project myfile /
|
|
||||||
```
|
|
||||||
|
|
||||||
#### <a name="source-oci-layout"></a> Source image from OCI layout directory
|
|
||||||
|
|
||||||
Source an image from a local [OCI layout compliant directory](https://github.com/opencontainers/image-spec/blob/main/image-layout.md):
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --build-context foo=oci-layout:///path/to/local/layout@sha256:abcd12345 .
|
|
||||||
```
|
|
||||||
|
|
||||||
```Dockerfile
|
|
||||||
FROM alpine
|
|
||||||
RUN apk add git
|
|
||||||
|
|
||||||
COPY --from=foo myfile /
|
|
||||||
|
|
||||||
FROM foo
|
|
||||||
```
|
|
||||||
|
|
||||||
The OCI layout directory must be compliant with the [OCI layout specification](https://github.com/opencontainers/image-spec/blob/main/image-layout.md). It looks _solely_ for hashes. It does not
|
|
||||||
do any form of `image:tag` resolution to find the hash of the manifest; that is up to you.
|
|
||||||
|
|
||||||
The format of the `--build-context` must be: `<context>=oci-layout://<path-to-local-layout>@sha256:<hash-of-manifest>`, where:
|
|
||||||
|
|
||||||
* `context` is the name of the build context as used in the `Dockerfile`.
|
|
||||||
* `path-to-local-layout` is the path on the local machine, where you are running `docker build`, to the spec-compliant OCI layout.
|
|
||||||
* `hash-of-manifest` is the hash of the manifest for the image. It can be a single-architecture manifest or a multi-architecture index.
|
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|
||||||
Same as [`buildx --builder`](buildx.md#builder).
|
Same as [`buildx --builder`](buildx.md#builder).
|
||||||
|
|
||||||
### <a name="cache-from"></a> Use an external cache source for a build (--cache-from)
|
### <a name="platform"></a> Set the target platforms for the build (--platform)
|
||||||
|
|
||||||
```
|
```
|
||||||
--cache-from=[NAME|type=TYPE[,KEY=VALUE]]
|
--platform=value[,value]
|
||||||
```
|
```
|
||||||
|
|
||||||
Use an external cache source for a build. Supported types are `registry`,
|
Set the target platform for the build. All `FROM` commands inside the Dockerfile
|
||||||
`local` and `gha`.
|
without their own `--platform` flag will pull base images for this platform and
|
||||||
|
this value will also be the platform of the resulting image. The default value
|
||||||
|
will be the current platform of the buildkit daemon.
|
||||||
|
|
||||||
- [`registry` source](https://github.com/moby/buildkit#registry-push-image-and-cache-separately)
|
When using `docker-container` driver with `buildx`, this flag can accept multiple
|
||||||
can import cache from a cache manifest or (special) image configuration on the
|
values as an input separated by a comma. With multiple values the result will be
|
||||||
registry.
|
built for all of the specified platforms and joined together into a single manifest
|
||||||
- [`local` source](https://github.com/moby/buildkit#local-directory-1) can
|
list.
|
||||||
import cache from local files previously exported with `--cache-to`.
|
|
||||||
- [`gha` source](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
|
||||||
can import cache from a previously exported cache with `--cache-to` in your
|
|
||||||
GitHub repository
|
|
||||||
|
|
||||||
If no type is specified, `registry` exporter is used with a specified reference.
|
If the `Dockerfile` needs to invoke the `RUN` command, the builder needs runtime
|
||||||
|
support for the specified platform. In a clean setup, you can only execute `RUN`
|
||||||
|
commands for your system architecture.
|
||||||
|
If your kernel supports [`binfmt_misc`](https://en.wikipedia.org/wiki/Binfmt_misc)
|
||||||
|
launchers for secondary architectures, buildx will pick them up automatically.
|
||||||
|
Docker desktop releases come with `binfmt_misc` automatically configured for `arm64`
|
||||||
|
and `arm` architectures. You can see what runtime platforms your current builder
|
||||||
|
instance supports by running `docker buildx inspect --bootstrap`.
|
||||||
|
|
||||||
`docker` driver currently only supports importing build cache from the registry.
|
Inside a `Dockerfile`, you can access the current platform value through
|
||||||
|
`TARGETPLATFORM` build argument. Please refer to the [`docker build`
|
||||||
|
documentation](https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope)
|
||||||
|
for the full description of automatic platform argument variants .
|
||||||
|
|
||||||
|
The formatting for the platform specifier is defined in the [containerd source
|
||||||
|
code](https://github.com/containerd/containerd/blob/v1.4.3/platforms/platforms.go#L63).
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx build --cache-from=user/app:cache .
|
$ docker buildx build --platform=linux/arm64 .
|
||||||
$ docker buildx build --cache-from=user/app .
|
$ docker buildx build --platform=linux/amd64,linux/arm64,linux/arm/v7 .
|
||||||
$ docker buildx build --cache-from=type=registry,ref=user/app .
|
$ docker buildx build --platform=darwin .
|
||||||
$ docker buildx build --cache-from=type=local,src=path/to/cache .
|
|
||||||
$ docker buildx build --cache-from=type=gha .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
### <a name="progress"></a> Set type of progress output (--progress)
|
||||||
|
|
||||||
### <a name="cache-to"></a> Export build cache to an external cache destination (--cache-to)
|
|
||||||
|
|
||||||
```
|
```
|
||||||
--cache-to=[NAME|type=TYPE[,KEY=VALUE]]
|
--progress=VALUE
|
||||||
```
|
```
|
||||||
|
|
||||||
Export build cache to an external cache destination. Supported types are
|
Set type of progress output (auto, plain, tty). Use plain to show container
|
||||||
`registry`, `local`, `inline` and `gha`.
|
output (default "auto").
|
||||||
|
|
||||||
- [`registry` type](https://github.com/moby/buildkit#registry-push-image-and-cache-separately) exports build cache to a cache manifest in the registry.
|
> You can also use the `BUILDKIT_PROGRESS` environment variable to set
|
||||||
- [`local` type](https://github.com/moby/buildkit#local-directory-1) type
|
> its value.
|
||||||
exports cache to a local directory on the client.
|
|
||||||
- [`inline` type](https://github.com/moby/buildkit#inline-push-image-and-cache-together)
|
|
||||||
type writes the cache metadata into the image configuration.
|
|
||||||
- [`gha` type](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
|
||||||
type exports cache through the [Github Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication).
|
|
||||||
|
|
||||||
`docker` driver currently only supports exporting inline cache metadata to image
|
The following example uses `plain` output during the build:
|
||||||
configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used
|
|
||||||
to trigger inline cache exporter.
|
|
||||||
|
|
||||||
Attribute key:
|
|
||||||
|
|
||||||
- `mode` - Specifies how many layers are exported with the cache. `min` on only
|
|
||||||
exports layers already in the final build stage, `max` exports layers for
|
|
||||||
all stages. Metadata is always exported for the whole build.
|
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx build --cache-to=user/app:cache .
|
$ docker buildx build --load --progress=plain .
|
||||||
$ docker buildx build --cache-to=type=inline .
|
|
||||||
$ docker buildx build --cache-to=type=registry,ref=user/app .
|
|
||||||
$ docker buildx build --cache-to=type=local,dest=path/to/cache .
|
|
||||||
$ docker buildx build --cache-to=type=gha .
|
|
||||||
```
|
|
||||||
|
|
||||||
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
#1 [internal] load build definition from Dockerfile
|
||||||
|
#1 transferring dockerfile: 227B 0.0s done
|
||||||
|
#1 DONE 0.1s
|
||||||
|
|
||||||
### <a name="load"></a> Load the single-platform build result to `docker images` (--load)
|
#2 [internal] load .dockerignore
|
||||||
|
#2 transferring context: 129B 0.0s done
|
||||||
Shorthand for [`--output=type=docker`](#docker). Will automatically load the
|
#2 DONE 0.0s
|
||||||
single-platform build result to `docker images`.
|
...
|
||||||
|
|
||||||
### <a name="metadata-file"></a> Write build result metadata to the file (--metadata-file)
|
|
||||||
|
|
||||||
To output build metadata such as the image digest, pass the `--metadata-file` flag.
|
|
||||||
The metadata will be written as a JSON object to the specified file. The
|
|
||||||
directory of the specified file must already exist and be writable.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --load --metadata-file metadata.json .
|
|
||||||
$ cat metadata.json
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"containerimage.buildinfo": {
|
|
||||||
"frontend": "dockerfile.v0",
|
|
||||||
"attrs": {
|
|
||||||
"context": "https://github.com/crazy-max/buildkit-buildsources-test.git#master",
|
|
||||||
"filename": "Dockerfile",
|
|
||||||
"source": "docker/dockerfile:master"
|
|
||||||
},
|
|
||||||
"sources": [
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
|
|
||||||
"pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/library/alpine:3.13",
|
|
||||||
"pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"containerimage.config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66",
|
|
||||||
"containerimage.descriptor": {
|
|
||||||
"annotations": {
|
|
||||||
"config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66",
|
|
||||||
"org.opencontainers.image.created": "2022-02-08T21:28:03Z"
|
|
||||||
},
|
|
||||||
"digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3",
|
|
||||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
|
||||||
"size": 506
|
|
||||||
},
|
|
||||||
"containerimage.digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3"
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### <a name="output"></a> Set the export action for the build result (-o, --output)
|
### <a name="output"></a> Set the export action for the build result (-o, --output)
|
||||||
@@ -296,6 +146,8 @@ If just the path is specified as a value, `buildx` will use the local exporter
|
|||||||
with this path as the destination. If the value is "-", `buildx` will use `tar`
|
with this path as the destination. If the value is "-", `buildx` will use `tar`
|
||||||
exporter and write to `stdout`.
|
exporter and write to `stdout`.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx build -o . .
|
$ docker buildx build -o . .
|
||||||
$ docker buildx build -o outdir .
|
$ docker buildx build -o outdir .
|
||||||
@@ -349,7 +201,7 @@ The most common usecase for multi-platform images is to directly push to a regis
|
|||||||
Attribute keys:
|
Attribute keys:
|
||||||
|
|
||||||
- `dest` - destination path where tarball will be written. If not specified the
|
- `dest` - destination path where tarball will be written. If not specified the
|
||||||
tar will be loaded automatically to the current docker instance.
|
tar will be loaded automatically to the current docker instance.
|
||||||
- `context` - name for the docker context where to import the result
|
- `context` - name for the docker context where to import the result
|
||||||
|
|
||||||
#### `image`
|
#### `image`
|
||||||
@@ -367,169 +219,118 @@ Attribute keys:
|
|||||||
|
|
||||||
The `registry` exporter is a shortcut for `type=image,push=true`.
|
The `registry` exporter is a shortcut for `type=image,push=true`.
|
||||||
|
|
||||||
### <a name="platform"></a> Set the target platforms for the build (--platform)
|
|
||||||
|
|
||||||
```
|
|
||||||
--platform=value[,value]
|
|
||||||
```
|
|
||||||
|
|
||||||
Set the target platform for the build. All `FROM` commands inside the Dockerfile
|
|
||||||
without their own `--platform` flag will pull base images for this platform and
|
|
||||||
this value will also be the platform of the resulting image. The default value
|
|
||||||
will be the current platform of the buildkit daemon.
|
|
||||||
|
|
||||||
When using `docker-container` driver with `buildx`, this flag can accept multiple
|
|
||||||
values as an input separated by a comma. With multiple values the result will be
|
|
||||||
built for all of the specified platforms and joined together into a single manifest
|
|
||||||
list.
|
|
||||||
|
|
||||||
If the `Dockerfile` needs to invoke the `RUN` command, the builder needs runtime
|
|
||||||
support for the specified platform. In a clean setup, you can only execute `RUN`
|
|
||||||
commands for your system architecture.
|
|
||||||
If your kernel supports [`binfmt_misc`](https://en.wikipedia.org/wiki/Binfmt_misc)
|
|
||||||
launchers for secondary architectures, buildx will pick them up automatically.
|
|
||||||
Docker desktop releases come with `binfmt_misc` automatically configured for `arm64`
|
|
||||||
and `arm` architectures. You can see what runtime platforms your current builder
|
|
||||||
instance supports by running `docker buildx inspect --bootstrap`.
|
|
||||||
|
|
||||||
Inside a `Dockerfile`, you can access the current platform value through
|
|
||||||
`TARGETPLATFORM` build argument. Please refer to the [`docker build`
|
|
||||||
documentation](https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope)
|
|
||||||
for the full description of automatic platform argument variants .
|
|
||||||
|
|
||||||
The formatting for the platform specifier is defined in the [containerd source
|
|
||||||
code](https://github.com/containerd/containerd/blob/v1.4.3/platforms/platforms.go#L63).
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --platform=linux/arm64 .
|
|
||||||
$ docker buildx build --platform=linux/amd64,linux/arm64,linux/arm/v7 .
|
|
||||||
$ docker buildx build --platform=darwin .
|
|
||||||
```
|
|
||||||
|
|
||||||
### <a name="progress"></a> Set type of progress output (--progress)
|
|
||||||
|
|
||||||
```
|
|
||||||
--progress=VALUE
|
|
||||||
```
|
|
||||||
|
|
||||||
Set type of progress output (auto, plain, tty). Use plain to show container
|
|
||||||
output (default "auto").
|
|
||||||
|
|
||||||
> **Note**
|
|
||||||
>
|
|
||||||
> You can also use the `BUILDKIT_PROGRESS` environment variable to set its value.
|
|
||||||
|
|
||||||
The following example uses `plain` output during the build:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --load --progress=plain .
|
|
||||||
|
|
||||||
#1 [internal] load build definition from Dockerfile
|
|
||||||
#1 transferring dockerfile: 227B 0.0s done
|
|
||||||
#1 DONE 0.1s
|
|
||||||
|
|
||||||
#2 [internal] load .dockerignore
|
|
||||||
#2 transferring context: 129B 0.0s done
|
|
||||||
#2 DONE 0.0s
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
> **Note**
|
|
||||||
>
|
|
||||||
> Check also our [Color output controls guide](https://github.com/docker/buildx/blob/master/docs/guides/color-output.md)
|
|
||||||
> for modifying the colors that are used to output information to the terminal.
|
|
||||||
|
|
||||||
### <a name="push"></a> Push the build result to a registry (--push)
|
### <a name="push"></a> Push the build result to a registry (--push)
|
||||||
|
|
||||||
Shorthand for [`--output=type=registry`](#registry). Will automatically push the
|
Shorthand for [`--output=type=registry`](#registry). Will automatically push the
|
||||||
build result to registry.
|
build result to registry.
|
||||||
|
|
||||||
### <a name="secret"></a> Secret to expose to the build (--secret)
|
### <a name="load"></a> Load the single-platform build result to `docker images` (--load)
|
||||||
|
|
||||||
|
Shorthand for [`--output=type=docker`](#docker). Will automatically load the
|
||||||
|
single-platform build result to `docker images`.
|
||||||
|
|
||||||
|
### <a name="cache-from"></a> Use an external cache source for a build (--cache-from)
|
||||||
|
|
||||||
```
|
```
|
||||||
--secret=[type=TYPE[,KEY=VALUE]
|
--cache-from=[NAME|type=TYPE[,KEY=VALUE]]
|
||||||
```
|
```
|
||||||
|
|
||||||
Exposes secret to the build. The secret can be used by the build using
|
Use an external cache source for a build. Supported types are `registry`,
|
||||||
[`RUN --mount=type=secret` mount](https://docs.docker.com/engine/reference/builder/#run---mounttypesecret).
|
`local` and `gha`.
|
||||||
|
|
||||||
If `type` is unset it will be detected. Supported types are:
|
- [`registry` source](https://github.com/moby/buildkit#registry-push-image-and-cache-separately)
|
||||||
|
can import cache from a cache manifest or (special) image configuration on the
|
||||||
|
registry.
|
||||||
|
- [`local` source](https://github.com/moby/buildkit#local-directory-1) can
|
||||||
|
import cache from local files previously exported with `--cache-to`.
|
||||||
|
- [`gha` source](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
||||||
|
can import cache from a previously exported cache with `--cache-to` in your
|
||||||
|
GitHub repository
|
||||||
|
|
||||||
#### `file`
|
If no type is specified, `registry` exporter is used with a specified reference.
|
||||||
|
|
||||||
Attribute keys:
|
`docker` driver currently only supports importing build cache from the registry.
|
||||||
|
|
||||||
- `id` - ID of the secret. Defaults to basename of the `src` path.
|
**Examples**
|
||||||
- `src`, `source` - Secret filename. `id` used if unset.
|
|
||||||
|
|
||||||
```dockerfile
|
|
||||||
# syntax=docker/dockerfile:1.4
|
|
||||||
FROM python:3
|
|
||||||
RUN pip install awscli
|
|
||||||
RUN --mount=type=secret,id=aws,target=/root/.aws/credentials \
|
|
||||||
aws s3 cp s3://... ...
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx build --secret id=aws,src=$HOME/.aws/credentials .
|
$ docker buildx build --cache-from=user/app:cache .
|
||||||
|
$ docker buildx build --cache-from=user/app .
|
||||||
|
$ docker buildx build --cache-from=type=registry,ref=user/app .
|
||||||
|
$ docker buildx build --cache-from=type=local,src=path/to/cache .
|
||||||
|
$ docker buildx build --cache-from=type=gha .
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `env`
|
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
||||||
|
|
||||||
Attribute keys:
|
### <a name="cache-to"></a> Export build cache to an external cache destination (--cache-to)
|
||||||
|
|
||||||
- `id` - ID of the secret. Defaults to `env` name.
|
|
||||||
- `env` - Secret environment variable. `id` used if unset, otherwise will look for `src`, `source` if `id` unset.
|
|
||||||
|
|
||||||
```dockerfile
|
|
||||||
# syntax=docker/dockerfile:1.4
|
|
||||||
FROM node:alpine
|
|
||||||
RUN --mount=type=bind,target=. \
|
|
||||||
--mount=type=secret,id=SECRET_TOKEN \
|
|
||||||
SECRET_TOKEN=$(cat /run/secrets/SECRET_TOKEN) yarn run test
|
|
||||||
```
|
```
|
||||||
|
--cache-to=[NAME|type=TYPE[,KEY=VALUE]]
|
||||||
|
```
|
||||||
|
|
||||||
|
Export build cache to an external cache destination. Supported types are
|
||||||
|
`registry`, `local`, `inline` and `gha`.
|
||||||
|
|
||||||
|
- [`registry` type](https://github.com/moby/buildkit#registry-push-image-and-cache-separately) exports build cache to a cache manifest in the registry.
|
||||||
|
- [`local` type](https://github.com/moby/buildkit#local-directory-1) type
|
||||||
|
exports cache to a local directory on the client.
|
||||||
|
- [`inline` type](https://github.com/moby/buildkit#inline-push-image-and-cache-together)
|
||||||
|
type writes the cache metadata into the image configuration.
|
||||||
|
- [`gha` type](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
||||||
|
type exports cache through the [Github Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication).
|
||||||
|
|
||||||
|
`docker` driver currently only supports exporting inline cache metadata to image
|
||||||
|
configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used
|
||||||
|
to trigger inline cache exporter.
|
||||||
|
|
||||||
|
Attribute key:
|
||||||
|
|
||||||
|
- `mode` - Specifies how many layers are exported with the cache. `min` on only
|
||||||
|
exports layers already in the final build stage, `max` exports layers for
|
||||||
|
all stages. Metadata is always exported for the whole build.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ SECRET_TOKEN=token docker buildx build --secret id=SECRET_TOKEN .
|
$ docker buildx build --cache-to=user/app:cache .
|
||||||
|
$ docker buildx build --cache-to=type=inline .
|
||||||
|
$ docker buildx build --cache-to=type=registry,ref=user/app .
|
||||||
|
$ docker buildx build --cache-to=type=local,dest=path/to/cache .
|
||||||
|
$ docker buildx build --cache-to=type=gha .
|
||||||
```
|
```
|
||||||
|
|
||||||
### <a name="shm-size"></a> Size of /dev/shm (--shm-size)
|
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
||||||
|
|
||||||
|
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
|
||||||
|
|
||||||
|
```
|
||||||
|
--allow=ENTITLEMENT
|
||||||
|
```
|
||||||
|
|
||||||
|
Allow extra privileged entitlement. List of entitlements:
|
||||||
|
|
||||||
|
- `network.host` - Allows executions with host networking.
|
||||||
|
- `security.insecure` - Allows executions without sandbox. See
|
||||||
|
[related Dockerfile extensions](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#run---securityinsecuresandbox).
|
||||||
|
|
||||||
|
For entitlements to be enabled, the `buildkitd` daemon also needs to allow them
|
||||||
|
with `--allow-insecure-entitlement` (see [`create --buildkitd-flags`](buildx_create.md#buildkitd-flags))
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx create --use --name insecure-builder --buildkitd-flags '--allow-insecure-entitlement security.insecure'
|
||||||
|
$ docker buildx build --allow security.insecure .
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="shm-size"></a> Size of `/dev/shm` (--shm-size)
|
||||||
|
|
||||||
The format is `<number><unit>`. `number` must be greater than `0`. Unit is
|
The format is `<number><unit>`. `number` must be greater than `0`. Unit is
|
||||||
optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g`
|
optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g`
|
||||||
(gigabytes). If you omit the unit, the system uses bytes.
|
(gigabytes). If you omit the unit, the system uses bytes.
|
||||||
|
|
||||||
### <a name="ssh"></a> SSH agent socket or keys to expose to the build (--ssh)
|
|
||||||
|
|
||||||
```
|
|
||||||
--ssh=default|<id>[=<socket>|<key>[,<key>]]
|
|
||||||
```
|
|
||||||
|
|
||||||
This can be useful when some commands in your Dockerfile need specific SSH
|
|
||||||
authentication (e.g., cloning a private repository).
|
|
||||||
|
|
||||||
`--ssh` exposes SSH agent socket or keys to the build and can be used with the
|
|
||||||
[`RUN --mount=type=ssh` mount](https://docs.docker.com/engine/reference/builder/#run---mounttypessh).
|
|
||||||
|
|
||||||
Example to access Gitlab using an SSH agent socket:
|
|
||||||
|
|
||||||
```dockerfile
|
|
||||||
# syntax=docker/dockerfile:1.4
|
|
||||||
FROM alpine
|
|
||||||
RUN apk add --no-cache openssh-client
|
|
||||||
RUN mkdir -p -m 0700 ~/.ssh && ssh-keyscan gitlab.com >> ~/.ssh/known_hosts
|
|
||||||
RUN --mount=type=ssh ssh -q -T git@gitlab.com 2>&1 | tee /hello
|
|
||||||
# "Welcome to GitLab, @GITLAB_USERNAME_ASSOCIATED_WITH_SSHKEY" should be printed here
|
|
||||||
# with the type of build progress is defined as `plain`.
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ eval $(ssh-agent)
|
|
||||||
$ ssh-add ~/.ssh/id_rsa
|
|
||||||
(Input your passphrase here)
|
|
||||||
$ docker buildx build --ssh default=$SSH_AUTH_SOCK .
|
|
||||||
```
|
|
||||||
|
|
||||||
### <a name="ulimit"></a> Set ulimits (--ulimit)
|
### <a name="ulimit"></a> Set ulimits (--ulimit)
|
||||||
|
|
||||||
`--ulimit` is specified with a soft and hard limit as such:
|
`--ulimit` is specified with a soft and hard limit as such:
|
||||||
|
|||||||
@@ -9,19 +9,19 @@ Create a new builder instance
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--append`](#append) | | | Append a node to builder instead of changing it |
|
| [`--append`](#append) | Append a node to builder instead of changing it |
|
||||||
| `--bootstrap` | | | Boot builder after creation |
|
| `--bootstrap` | Boot builder after creation |
|
||||||
| [`--buildkitd-flags`](#buildkitd-flags) | `string` | | Flags for buildkitd daemon |
|
| [`--buildkitd-flags string`](#buildkitd-flags) | Flags for buildkitd daemon |
|
||||||
| [`--config`](#config) | `string` | | BuildKit config file |
|
| [`--config string`](#config) | BuildKit config file |
|
||||||
| [`--driver`](#driver) | `string` | | Driver to use (available: `docker-container`, `kubernetes`, `remote`) |
|
| [`--driver string`](#driver) | Driver to use (available: `docker`, `docker-container`, `kubernetes`) |
|
||||||
| [`--driver-opt`](#driver-opt) | `stringArray` | | Options for the driver |
|
| [`--driver-opt stringArray`](#driver-opt) | Options for the driver |
|
||||||
| [`--leave`](#leave) | | | Remove a node from builder instead of changing it |
|
| [`--leave`](#leave) | Remove a node from builder instead of changing it |
|
||||||
| [`--name`](#name) | `string` | | Builder instance name |
|
| [`--name string`](#name) | Builder instance name |
|
||||||
| [`--node`](#node) | `string` | | Create/modify node with given name |
|
| [`--node string`](#node) | Create/modify node with given name |
|
||||||
| [`--platform`](#platform) | `stringArray` | | Fixed platforms for current node |
|
| [`--platform stringArray`](#platform) | Fixed platforms for current node |
|
||||||
| [`--use`](#use) | | | Set the current builder instance |
|
| [`--use`](#use) | Set the current builder instance |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
@@ -47,6 +47,8 @@ The `--append` flag changes the action of the command to append a new node to an
|
|||||||
existing builder specified by `--name`. Buildx will choose an appropriate node
|
existing builder specified by `--name`. Buildx will choose an appropriate node
|
||||||
for a build based on the platforms it supports.
|
for a build based on the platforms it supports.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx create mycontext1
|
$ docker buildx create mycontext1
|
||||||
eager_beaver
|
eager_beaver
|
||||||
@@ -65,6 +67,8 @@ Adds flags when starting the buildkitd daemon. They take precedence over the
|
|||||||
configuration file specified by [`--config`](#config). See `buildkitd --help`
|
configuration file specified by [`--config`](#config). See `buildkitd --help`
|
||||||
for the available flags.
|
for the available flags.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
```
|
```
|
||||||
--buildkitd-flags '--debug --debugaddr 0.0.0.0:6666'
|
--buildkitd-flags '--debug --debugaddr 0.0.0.0:6666'
|
||||||
```
|
```
|
||||||
@@ -79,11 +83,6 @@ Specifies the configuration file for the buildkitd daemon to use. The configurat
|
|||||||
can be overridden by [`--buildkitd-flags`](#buildkitd-flags).
|
can be overridden by [`--buildkitd-flags`](#buildkitd-flags).
|
||||||
See an [example buildkitd configuration file](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md).
|
See an [example buildkitd configuration file](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md).
|
||||||
|
|
||||||
If the configuration file is not specified, will look for one by default in:
|
|
||||||
* `$BUILDX_CONFIG/buildkitd.default.toml`
|
|
||||||
* `$DOCKER_CONFIG/buildx/buildkitd.default.toml`
|
|
||||||
* `~/.docker/buildx/buildkitd.default.toml`
|
|
||||||
|
|
||||||
Note that if you create a `docker-container` builder and have specified
|
Note that if you create a `docker-container` builder and have specified
|
||||||
certificates for registries in the `buildkitd.toml` configuration, the files
|
certificates for registries in the `buildkitd.toml` configuration, the files
|
||||||
will be copied into the container under `/etc/buildkit/certs` and configuration
|
will be copied into the container under `/etc/buildkit/certs` and configuration
|
||||||
@@ -123,59 +122,56 @@ Unlike `docker` driver, built images will not automatically appear in
|
|||||||
`docker images` and [`build --load`](buildx_build.md#load) needs to be used
|
`docker images` and [`build --load`](buildx_build.md#load) needs to be used
|
||||||
to achieve that.
|
to achieve that.
|
||||||
|
|
||||||
#### `remote` driver
|
|
||||||
|
|
||||||
Uses a remote instance of buildkitd over an arbitrary connection. With this
|
|
||||||
driver, you manually create and manage instances of buildkit yourself, and
|
|
||||||
configure buildx to point at it.
|
|
||||||
|
|
||||||
Unlike `docker` driver, built images will not automatically appear in
|
|
||||||
`docker images` and [`build --load`](buildx_build.md#load) needs to be used
|
|
||||||
to achieve that.
|
|
||||||
|
|
||||||
### <a name="driver-opt"></a> Set additional driver-specific options (--driver-opt)
|
### <a name="driver-opt"></a> Set additional driver-specific options (--driver-opt)
|
||||||
|
|
||||||
```
|
```
|
||||||
--driver-opt OPTIONS
|
--driver-opt OPTIONS
|
||||||
```
|
```
|
||||||
|
|
||||||
Passes additional driver-specific options.
|
Passes additional driver-specific options. Details for each driver:
|
||||||
|
|
||||||
Note: When using quoted values for example for the `nodeselector` or
|
- `docker` - No driver options
|
||||||
`tolerations` options, ensure that quotes are escaped correctly for your shell.
|
- `docker-container`
|
||||||
|
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||||
|
- `network=NETMODE` - Sets the network mode for running the buildkit container.
|
||||||
|
- `cgroup-parent=CGROUP` - Sets the cgroup parent of the buildkit container if docker is using the "cgroupfs" driver. Defaults to `/docker/buildx`.
|
||||||
|
- `kubernetes`
|
||||||
|
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||||
|
- `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace.
|
||||||
|
- `replicas=N` - Sets the number of `Pod` replicas. Defaults to 1.
|
||||||
|
- `requests.cpu` - Sets the request CPU value specified in units of Kubernetes CPU. Example `requests.cpu=100m`, `requests.cpu=2`
|
||||||
|
- `requests.memory` - Sets the request memory value specified in bytes or with a valid suffix. Example `requests.memory=500Mi`, `requests.memory=4G`
|
||||||
|
- `limits.cpu` - Sets the limit CPU value specified in units of Kubernetes CPU. Example `limits.cpu=100m`, `limits.cpu=2`
|
||||||
|
- `limits.memory` - Sets the limit memory value specified in bytes or with a valid suffix. Example `limits.memory=500Mi`, `limits.memory=4G`
|
||||||
|
- `nodeselector="label1=value1,label2=value2"` - Sets the kv of `Pod` nodeSelector. No Defaults. Example `nodeselector=kubernetes.io/arch=arm64`
|
||||||
|
- `rootless=(true|false)` - Run the container as a non-root user without `securityContext.privileged`. [Using Ubuntu host kernel is recommended](https://github.com/moby/buildkit/blob/master/docs/rootless.md). Defaults to false.
|
||||||
|
- `loadbalance=(sticky|random)` - Load-balancing strategy. If set to "sticky", the pod is chosen using the hash of the context path. Defaults to "sticky"
|
||||||
|
- `qemu.install=(true|false)` - Install QEMU emulation for multi platforms support.
|
||||||
|
- `qemu.image=IMAGE` - Sets the QEMU emulation image. Defaults to `tonistiigi/binfmt:latest`
|
||||||
|
|
||||||
#### `docker` driver
|
**Examples**
|
||||||
|
|
||||||
No driver options.
|
#### Use a custom network
|
||||||
|
|
||||||
#### `docker-container` driver
|
```console
|
||||||
|
$ docker network create foonet
|
||||||
|
$ docker buildx create --name builder --driver docker-container --driver-opt network=foonet --use
|
||||||
|
$ docker buildx inspect --bootstrap
|
||||||
|
$ docker inspect buildx_buildkit_builder0 --format={{.NetworkSettings.Networks}}
|
||||||
|
map[foonet:0xc00018c0c0]
|
||||||
|
```
|
||||||
|
|
||||||
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
#### OpenTelemetry support
|
||||||
- `network=NETMODE` - Sets the network mode for running the buildkit container.
|
|
||||||
- `cgroup-parent=CGROUP` - Sets the cgroup parent of the buildkit container if docker is using the "cgroupfs" driver. Defaults to `/docker/buildx`.
|
|
||||||
|
|
||||||
#### `kubernetes` driver
|
To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set
|
||||||
|
`JAEGER_TRACE` environment variable to the collection address using the `driver-opt`:
|
||||||
|
|
||||||
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
```console
|
||||||
- `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace.
|
$ docker run -d --name jaeger -p 6831:6831/udp -p 16686:16686 jaegertracing/all-in-one
|
||||||
- `replicas=N` - Sets the number of `Pod` replicas. Defaults to 1.
|
$ docker buildx create --name builder --driver docker-container --driver-opt network=host --driver-opt env.JAEGER_TRACE=localhost:6831 --use
|
||||||
- `requests.cpu` - Sets the request CPU value specified in units of Kubernetes CPU. Example `requests.cpu=100m`, `requests.cpu=2`
|
$ docker buildx inspect --bootstrap
|
||||||
- `requests.memory` - Sets the request memory value specified in bytes or with a valid suffix. Example `requests.memory=500Mi`, `requests.memory=4G`
|
# buildx command should be traced at http://127.0.0.1:16686/
|
||||||
- `limits.cpu` - Sets the limit CPU value specified in units of Kubernetes CPU. Example `limits.cpu=100m`, `limits.cpu=2`
|
```
|
||||||
- `limits.memory` - Sets the limit memory value specified in bytes or with a valid suffix. Example `limits.memory=500Mi`, `limits.memory=4G`
|
|
||||||
- `"nodeselector=label1=value1,label2=value2"` - Sets the kv of `Pod` nodeSelector. No Defaults. Example `nodeselector=kubernetes.io/arch=arm64`
|
|
||||||
- `"tolerations=key=foo,value=bar;key=foo2,operator=exists;key=foo3,effect=NoSchedule"` - Sets the `Pod` tolerations. Accepts the same values as the kube manifest tolera>tions. Key-value pairs are separated by `,`, tolerations are separated by `;`. No Defaults. Example `tolerations=operator=exists`
|
|
||||||
- `rootless=(true|false)` - Run the container as a non-root user without `securityContext.privileged`. Needs Kubernetes 1.19 or later. [Using Ubuntu host kernel is recommended](https://github.com/moby/buildkit/blob/master/docs/rootless.md). Defaults to false.
|
|
||||||
- `loadbalance=(sticky|random)` - Load-balancing strategy. If set to "sticky", the pod is chosen using the hash of the context path. Defaults to "sticky"
|
|
||||||
- `qemu.install=(true|false)` - Install QEMU emulation for multi platforms support.
|
|
||||||
- `qemu.image=IMAGE` - Sets the QEMU emulation image. Defaults to `tonistiigi/binfmt:latest`
|
|
||||||
|
|
||||||
#### `remote` driver
|
|
||||||
|
|
||||||
- `key=KEY` - Sets the TLS client key.
|
|
||||||
- `cert=CERT` - Sets the TLS client certificate to present to buildkitd.
|
|
||||||
- `cacert=CACERT` - Sets the TLS certificate authority used for validation.
|
|
||||||
- `servername=SERVER` - Sets the TLS server name to be used in requests (defaults to the endpoint hostname).
|
|
||||||
|
|
||||||
### <a name="leave"></a> Remove a node from a builder (--leave)
|
### <a name="leave"></a> Remove a node from a builder (--leave)
|
||||||
|
|
||||||
@@ -183,6 +179,8 @@ The `--leave` flag changes the action of the command to remove a node from a
|
|||||||
builder. The builder needs to be specified with `--name` and node that is removed
|
builder. The builder needs to be specified with `--name` and node that is removed
|
||||||
is set with `--node`.
|
is set with `--node`.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx create --name mybuilder --node mybuilder0 --leave
|
$ docker buildx create --name mybuilder --node mybuilder0 --leave
|
||||||
```
|
```
|
||||||
@@ -206,7 +204,7 @@ The `--node` flag specifies the name of the node to be created or modified. If
|
|||||||
none is specified, it is the name of the builder it belongs to, with an index
|
none is specified, it is the name of the builder it belongs to, with an index
|
||||||
number suffix.
|
number suffix.
|
||||||
|
|
||||||
### <a name="platform"></a> Set the platforms supported by the node (--platform)
|
### <a name="platform"></a> Set the platforms supported by the node
|
||||||
|
|
||||||
```
|
```
|
||||||
--platform PLATFORMS
|
--platform PLATFORMS
|
||||||
@@ -218,12 +216,14 @@ will also automatically detect the platforms it supports, but manual values take
|
|||||||
priority over the detected ones and can be used when multiple nodes support
|
priority over the detected ones and can be used when multiple nodes support
|
||||||
building for the same platform.
|
building for the same platform.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx create --platform linux/amd64
|
$ docker buildx create --platform linux/amd64
|
||||||
$ docker buildx create --platform linux/arm64,linux/arm/v8
|
$ docker buildx create --platform linux/arm64,linux/arm/v8
|
||||||
```
|
```
|
||||||
|
|
||||||
### <a name="use"></a> Automatically switch to the newly created builder (--use)
|
### <a name="use"></a> Automatically switch to the newly created builder
|
||||||
|
|
||||||
The `--use` flag automatically switches the current builder to the newly created
|
The `--use` flag automatically switches the current builder to the newly created
|
||||||
one. Equivalent to running `docker buildx use $(docker buildx create ...)`.
|
one. Equivalent to running `docker buildx use $(docker buildx create ...)`.
|
||||||
|
|||||||
@@ -9,11 +9,11 @@ Disk usage
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
| `--filter` | `filter` | | Provide filter values |
|
| `--filter filter` | Provide filter values |
|
||||||
| `--verbose` | | | Provide a more verbose output |
|
| `--verbose` | Provide a more verbose output |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|||||||
@@ -12,14 +12,14 @@ Commands to work on images in registry
|
|||||||
| Name | Description |
|
| Name | Description |
|
||||||
| --- | --- |
|
| --- | --- |
|
||||||
| [`create`](buildx_imagetools_create.md) | Create a new image based on source images |
|
| [`create`](buildx_imagetools_create.md) | Create a new image based on source images |
|
||||||
| [`inspect`](buildx_imagetools_inspect.md) | Show details of an image in the registry |
|
| [`inspect`](buildx_imagetools_inspect.md) | Show details of image in the registry |
|
||||||
|
|
||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|||||||
@@ -9,20 +9,22 @@ Create a new image based on source images
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--append`](#append) | | | Append to existing manifest |
|
| [`--append`](#append) | Append to existing manifest |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
| [`--dry-run`](#dry-run) | | | Show final image instead of pushing |
|
| [`--dry-run`](#dry-run) | Show final image instead of pushing |
|
||||||
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Read source descriptor from file |
|
| [`-f`](#file), [`--file stringArray`](#file) | Read source descriptor from file |
|
||||||
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
| [`-t`](#tag), [`--tag stringArray`](#tag) | Set reference for new image |
|
||||||
| [`-t`](#tag), [`--tag`](#tag) | `stringArray` | | Set reference for new image |
|
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
## Description
|
## Description
|
||||||
|
|
||||||
|
Imagetools contains commands for working with manifest lists in the registry.
|
||||||
|
These commands are useful for inspecting multi-platform build results.
|
||||||
|
|
||||||
Create a new manifest list based on source manifests. The source manifests can
|
Create a new manifest list based on source manifests. The source manifests can
|
||||||
be manifest lists or single platform distribution manifests and must already
|
be manifest lists or single platform distribution manifests and must already
|
||||||
exist in the registry where the new manifest is created. If only one source is
|
exist in the registry where the new manifest is created. If only one source is
|
||||||
@@ -55,15 +57,16 @@ or a JSON of OCI descriptor object.
|
|||||||
In order to define annotations or additional platform properties like `os.version` and
|
In order to define annotations or additional platform properties like `os.version` and
|
||||||
`os.features` you need to add them in the OCI descriptor object encoded in JSON.
|
`os.features` you need to add them in the OCI descriptor object encoded in JSON.
|
||||||
|
|
||||||
```console
|
```
|
||||||
$ docker buildx imagetools inspect --raw alpine | jq '.manifests[0] | .platform."os.version"="10.1"' > descr.json
|
docker buildx imagetools inspect --raw alpine | jq '.manifests[0] | .platform."os.version"="10.1"' > descr.json
|
||||||
$ docker buildx imagetools create -f descr.json myuser/image
|
docker buildx imagetools create -f descr.json myuser/image
|
||||||
```
|
```
|
||||||
|
|
||||||
The descriptor in the file is merged with existing descriptor in the registry if it exists.
|
The descriptor in the file is merged with existing descriptor in the registry if it exists.
|
||||||
|
|
||||||
The supported fields for the descriptor are defined in [OCI spec](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) .
|
The supported fields for the descriptor are defined in [OCI spec](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) .
|
||||||
|
|
||||||
|
|
||||||
### <a name="tag"></a> Set reference for new image (-t, --tag)
|
### <a name="tag"></a> Set reference for new image (-t, --tag)
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -72,7 +75,10 @@ The supported fields for the descriptor are defined in [OCI spec](https://github
|
|||||||
|
|
||||||
Use the `-t` or `--tag` flag to set the name of the image to be created.
|
Use the `-t` or `--tag` flag to set the name of the image to be created.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx imagetools create --dry-run alpine@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907 sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
$ docker buildx imagetools create --dry-run alpine@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907 sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
||||||
|
|
||||||
$ docker buildx imagetools create -t tonistiigi/myapp -f image1 -f image2
|
$ docker buildx imagetools create -t tonistiigi/myapp -f image1 -f image2
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -5,57 +5,40 @@ docker buildx imagetools inspect [OPTIONS] NAME
|
|||||||
```
|
```
|
||||||
|
|
||||||
<!---MARKER_GEN_START-->
|
<!---MARKER_GEN_START-->
|
||||||
Show details of an image in the registry
|
Show details of image in the registry
|
||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
| [`--format`](#format) | `string` | `{{.Manifest}}` | Format the output using the given Go template |
|
| [`--raw`](#raw) | Show original JSON manifest |
|
||||||
| [`--raw`](#raw) | | | Show original, unformatted JSON manifest |
|
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
## Description
|
## Description
|
||||||
|
|
||||||
Show details of an image in the registry.
|
Show details of image in the registry.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx imagetools inspect alpine
|
$ docker buildx imagetools inspect alpine
|
||||||
|
|
||||||
Name: docker.io/library/alpine:latest
|
Name: docker.io/library/alpine:latest
|
||||||
MediaType: application/vnd.docker.distribution.manifest.list.v2+json
|
MediaType: application/vnd.docker.distribution.manifest.list.v2+json
|
||||||
Digest: sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300
|
Digest: sha256:28ef97b8686a0b5399129e9b763d5b7e5ff03576aa5580d6f4182a49c5fe1913
|
||||||
|
|
||||||
Manifests:
|
Manifests:
|
||||||
Name: docker.io/library/alpine:latest@sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3
|
Name: docker.io/library/alpine:latest@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||||
Platform: linux/amd64
|
Platform: linux/amd64
|
||||||
|
|
||||||
Name: docker.io/library/alpine:latest@sha256:e047bc2af17934d38c5a7fa9f46d443f1de3a7675546402592ef805cfa929f9d
|
Name: docker.io/library/alpine:latest@sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||||
Platform: linux/arm/v6
|
Platform: linux/arm/v6
|
||||||
|
...
|
||||||
Name: docker.io/library/alpine:latest@sha256:8483ecd016885d8dba70426fda133c30466f661bb041490d525658f1aac73822
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/arm/v7
|
|
||||||
|
|
||||||
Name: docker.io/library/alpine:latest@sha256:c74f1b1166784193ea6c8f9440263b9be6cae07dfe35e32a5df7a31358ac2060
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/arm64/v8
|
|
||||||
|
|
||||||
Name: docker.io/library/alpine:latest@sha256:2689e157117d2da668ad4699549e55eba1ceb79cb7862368b30919f0488213f4
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/386
|
|
||||||
|
|
||||||
Name: docker.io/library/alpine:latest@sha256:2042a492bcdd847a01cd7f119cd48caa180da696ed2aedd085001a78664407d6
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/ppc64le
|
|
||||||
|
|
||||||
Name: docker.io/library/alpine:latest@sha256:49e322ab6690e73a4909f787bcbdb873631264ff4a108cddfd9f9c249ba1d58e
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/s390x
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
@@ -64,569 +47,7 @@ Manifests:
|
|||||||
|
|
||||||
Same as [`buildx --builder`](buildx.md#builder).
|
Same as [`buildx --builder`](buildx.md#builder).
|
||||||
|
|
||||||
### <a name="format"></a> Format the output (--format)
|
|
||||||
|
|
||||||
Format the output using the given Go template. Defaults to `{{.Manifest}}` if
|
|
||||||
unset. Following fields are available:
|
|
||||||
|
|
||||||
* `.Name`: provides the reference of the image
|
|
||||||
* `.Manifest`: provides the manifest or manifest list
|
|
||||||
* `.Image`: provides the image config
|
|
||||||
* `.BuildInfo`: provides [build info from image config](https://github.com/moby/buildkit/blob/master/docs/build-repro.md#image-config)
|
|
||||||
|
|
||||||
#### `.Name`
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect alpine --format "{{.Name}}"
|
|
||||||
Name: docker.io/library/alpine:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `.Manifest`
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect crazymax/loop --format "{{.Manifest}}"
|
|
||||||
Name: docker.io/crazymax/loop:latest
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Digest: sha256:08602e7340970e92bde5e0a2e887c1fde4d9ae753d1e05efb4c8ef3b609f97f1
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect moby/buildkit:master --format "{{.Manifest}}"
|
|
||||||
Name: docker.io/moby/buildkit:master
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.list.v2+json
|
|
||||||
Digest: sha256:3183f7ce54d1efb44c34b84f428ae10aaf141e553c6b52a7ff44cc7083a05a66
|
|
||||||
|
|
||||||
Manifests:
|
|
||||||
Name: docker.io/moby/buildkit:master@sha256:667d28c9fb33820ce686887a717a148e89fa77f9097f9352996bbcce99d352b1
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/amd64
|
|
||||||
|
|
||||||
Name: docker.io/moby/buildkit:master@sha256:71789527b64ab3d7b3de01d364b449cd7f7a3da758218fbf73b9c9aae05a6775
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/arm/v7
|
|
||||||
|
|
||||||
Name: docker.io/moby/buildkit:master@sha256:fb64667e1ce6ab0d05478f3a8402af07b27737598dcf9a510fb1d792b13a66be
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/arm64
|
|
||||||
|
|
||||||
Name: docker.io/moby/buildkit:master@sha256:1c3ddf95a0788e23f72f25800c05abc4458946685e2b66788c3d978cde6da92b
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/s390x
|
|
||||||
|
|
||||||
Name: docker.io/moby/buildkit:master@sha256:05bcde6d460a284e5bc88026cd070277e8380355de3126cbc8fe8a452708c6b1
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/ppc64le
|
|
||||||
|
|
||||||
Name: docker.io/moby/buildkit:master@sha256:c04c57765304ab84f4f9807fff3e11605c3a60e16435c734b02c723680f6bd6e
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/riscv64
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `.BuildInfo`
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect crazymax/buildx:buildinfo --format "{{.BuildInfo}}"
|
|
||||||
Name: docker.io/crazymax/buildx:buildinfo
|
|
||||||
Frontend: dockerfile.v0
|
|
||||||
Attrs:
|
|
||||||
filename: Dockerfile
|
|
||||||
source: docker/dockerfile-upstream:master-labs
|
|
||||||
build-arg:bar: foo
|
|
||||||
build-arg:foo: bar
|
|
||||||
Sources:
|
|
||||||
Type: docker-image
|
|
||||||
Ref: docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0
|
|
||||||
Pin: sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0
|
|
||||||
|
|
||||||
Type: docker-image
|
|
||||||
Ref: docker.io/library/alpine:3.13
|
|
||||||
Pin: sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c
|
|
||||||
|
|
||||||
Type: docker-image
|
|
||||||
Ref: docker.io/moby/buildkit:v0.9.0
|
|
||||||
Pin: sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab
|
|
||||||
|
|
||||||
Type: docker-image
|
|
||||||
Ref: docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04
|
|
||||||
Pin: sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04
|
|
||||||
|
|
||||||
Type: http
|
|
||||||
Ref: https://raw.githubusercontent.com/moby/moby/master/README.md
|
|
||||||
Pin: sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c
|
|
||||||
```
|
|
||||||
|
|
||||||
#### JSON output
|
|
||||||
|
|
||||||
A `json` go template func is also available if you want to render fields as
|
|
||||||
JSON bytes:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect crazymax/loop --format "{{json .Manifest}}"
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:08602e7340970e92bde5e0a2e887c1fde4d9ae753d1e05efb4c8ef3b609f97f1",
|
|
||||||
"size": 949
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect moby/buildkit:master --format "{{json .Manifest}}"
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"schemaVersion": 2,
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
|
|
||||||
"digest": "sha256:79d97f205e2799d99a3a8ae2a1ef17acb331e11784262c3faada847dc6972c52",
|
|
||||||
"size": 2010,
|
|
||||||
"manifests": [
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:bd1e78f06de26610fadf4eb9d04b1a45a545799d6342701726e952cc0c11c912",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "amd64",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:d37dcced63ec0965824fca644f0ac9efad8569434ec15b4c83adfcb3dcfc743b",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "arm",
|
|
||||||
"os": "linux",
|
|
||||||
"variant": "v7"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:ce142eb2255e6af46f2809e159fd03081697c7605a3de03b9cbe9a52ddb244bf",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "arm64",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:f59bfb5062fff76ce464bfa4e25ebaaaac887d6818238e119d68613c456d360c",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "s390x",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:cc96426e0c50a78105d5637d31356db5dd6ec594f21b24276e534a32da09645c",
|
|
||||||
"size": 1159,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "ppc64le",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:39f9c1e2878e6c333acb23187d6b205ce82ed934c60da326cb2c698192631478",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "riscv64",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect crazymax/buildx:buildinfo --format "{{json .BuildInfo}}"
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"frontend": "dockerfile.v0",
|
|
||||||
"attrs": {
|
|
||||||
"build-arg:bar": "foo",
|
|
||||||
"build-arg:foo": "bar",
|
|
||||||
"filename": "Dockerfile",
|
|
||||||
"source": "crazymax/dockerfile:buildattrs"
|
|
||||||
},
|
|
||||||
"sources": [
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
|
|
||||||
"pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/library/alpine:3.13@sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c",
|
|
||||||
"pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/moby/buildkit:v0.9.0@sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab",
|
|
||||||
"pin": "sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04",
|
|
||||||
"pin": "sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "http",
|
|
||||||
"ref": "https://raw.githubusercontent.com/moby/moby/master/README.md",
|
|
||||||
"pin": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect crazymax/buildx:buildinfo --format "{{json .}}"
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"name": "crazymax/buildx:buildinfo",
|
|
||||||
"manifest": {
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:899d2c7acbc124d406820857bb51d9089717bbe4e22b97eb4bc5789e99f09f83",
|
|
||||||
"size": 2628
|
|
||||||
},
|
|
||||||
"image": {
|
|
||||||
"created": "2022-02-24T12:27:43.627154558Z",
|
|
||||||
"architecture": "amd64",
|
|
||||||
"os": "linux",
|
|
||||||
"config": {
|
|
||||||
"Env": [
|
|
||||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
|
||||||
"DOCKER_TLS_CERTDIR=/certs",
|
|
||||||
"DOCKER_CLI_EXPERIMENTAL=enabled"
|
|
||||||
],
|
|
||||||
"Entrypoint": [
|
|
||||||
"docker-entrypoint.sh"
|
|
||||||
],
|
|
||||||
"Cmd": [
|
|
||||||
"sh"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"rootfs": {
|
|
||||||
"type": "layers",
|
|
||||||
"diff_ids": [
|
|
||||||
"sha256:7fcb75871b2101082203959c83514ac8a9f4ecfee77a0fe9aa73bbe56afdf1b4",
|
|
||||||
"sha256:d3c0b963ff5684160641f936d6a4aa14efc8ff27b6edac255c07f2d03ff92e82",
|
|
||||||
"sha256:3f8d78f13fa9b1f35d3bc3f1351d03a027c38018c37baca73f93eecdea17f244",
|
|
||||||
"sha256:8e6eb1137b182ae0c3f5d40ca46341fda2eaeeeb5fa516a9a2bf96171238e2e0",
|
|
||||||
"sha256:fde4c869a56b54dd76d7352ddaa813fd96202bda30b9dceb2c2f2ad22fa2e6ce",
|
|
||||||
"sha256:52025823edb284321af7846419899234b3c66219bf06061692b709875ed0760f",
|
|
||||||
"sha256:50adb5982dbf6126c7cf279ac3181d1e39fc9116b610b947a3dadae6f7e7c5bc",
|
|
||||||
"sha256:9801c319e1c66c5d295e78b2d3e80547e73c7e3c63a4b71e97c8ca357224af24",
|
|
||||||
"sha256:dfbfac44d5d228c49b42194c8a2f470abd6916d072f612a6fb14318e94fde8ae",
|
|
||||||
"sha256:3dfb74e19dedf61568b917c19b0fd3ee4580870027ca0b6054baf239855d1322",
|
|
||||||
"sha256:b182e707c23e4f19be73f9022a99d2d1ca7bf1ca8f280d40e4d1c10a6f51550e"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"history": [
|
|
||||||
{
|
|
||||||
"created": "2021-11-12T17:19:58.698676655Z",
|
|
||||||
"created_by": "/bin/sh -c #(nop) ADD file:5a707b9d6cb5fff532e4c2141bc35707593f21da5528c9e71ae2ddb6ba4a4eb6 in / "
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2021-11-12T17:19:58.948920855Z",
|
|
||||||
"created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]",
|
|
||||||
"empty_layer": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:38.285594601Z",
|
|
||||||
"created_by": "RUN /bin/sh -c apk --update --no-cache add bash ca-certificates openssh-client \u0026\u0026 rm -rf /tmp/* /var/cache/apk/* # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:41.061874167Z",
|
|
||||||
"created_by": "COPY /opt/docker/ /usr/local/bin/ # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:41.174098947Z",
|
|
||||||
"created_by": "COPY /usr/bin/buildctl /usr/local/bin/buildctl # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:41.320343683Z",
|
|
||||||
"created_by": "COPY /usr/bin/buildkit* /usr/local/bin/ # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:41.447149933Z",
|
|
||||||
"created_by": "COPY /buildx /usr/libexec/docker/cli-plugins/docker-buildx # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.057722191Z",
|
|
||||||
"created_by": "COPY /opt/docker-compose /usr/libexec/docker/cli-plugins/docker-compose # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.145224134Z",
|
|
||||||
"created_by": "ADD https://raw.githubusercontent.com/moby/moby/master/README.md / # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.422212427Z",
|
|
||||||
"created_by": "ENV DOCKER_TLS_CERTDIR=/certs",
|
|
||||||
"comment": "buildkit.dockerfile.v0",
|
|
||||||
"empty_layer": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.422212427Z",
|
|
||||||
"created_by": "ENV DOCKER_CLI_EXPERIMENTAL=enabled",
|
|
||||||
"comment": "buildkit.dockerfile.v0",
|
|
||||||
"empty_layer": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.422212427Z",
|
|
||||||
"created_by": "RUN /bin/sh -c docker --version \u0026\u0026 buildkitd --version \u0026\u0026 buildctl --version \u0026\u0026 docker buildx version \u0026\u0026 docker compose version \u0026\u0026 mkdir /certs /certs/client \u0026\u0026 chmod 1777 /certs /certs/client # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.514320155Z",
|
|
||||||
"created_by": "COPY rootfs/modprobe.sh /usr/local/bin/modprobe # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.627154558Z",
|
|
||||||
"created_by": "COPY rootfs/docker-entrypoint.sh /usr/local/bin/ # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.627154558Z",
|
|
||||||
"created_by": "ENTRYPOINT [\"docker-entrypoint.sh\"]",
|
|
||||||
"comment": "buildkit.dockerfile.v0",
|
|
||||||
"empty_layer": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.627154558Z",
|
|
||||||
"created_by": "CMD [\"sh\"]",
|
|
||||||
"comment": "buildkit.dockerfile.v0",
|
|
||||||
"empty_layer": true
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"buildinfo": {
|
|
||||||
"frontend": "dockerfile.v0",
|
|
||||||
"attrs": {
|
|
||||||
"build-arg:bar": "foo",
|
|
||||||
"build-arg:foo": "bar",
|
|
||||||
"filename": "Dockerfile",
|
|
||||||
"source": "docker/dockerfile-upstream:master-labs"
|
|
||||||
},
|
|
||||||
"sources": [
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
|
|
||||||
"pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/library/alpine:3.13",
|
|
||||||
"pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/moby/buildkit:v0.9.0",
|
|
||||||
"pin": "sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04",
|
|
||||||
"pin": "sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "http",
|
|
||||||
"ref": "https://raw.githubusercontent.com/moby/moby/master/README.md",
|
|
||||||
"pin": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Multi-platform
|
|
||||||
|
|
||||||
Multi-platform images are supported for `.Image` and `.BuildInfo` fields. If
|
|
||||||
you want to pick up a specific platform, you can specify it using the `index`
|
|
||||||
go template function:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect --format '{{json (index .Image "linux/s390x")}}' moby/buildkit:master
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"created": "2022-02-25T17:13:27.89891722Z",
|
|
||||||
"architecture": "s390x",
|
|
||||||
"os": "linux",
|
|
||||||
"config": {
|
|
||||||
"Env": [
|
|
||||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
|
||||||
],
|
|
||||||
"Entrypoint": [
|
|
||||||
"buildkitd"
|
|
||||||
],
|
|
||||||
"Volumes": {
|
|
||||||
"/var/lib/buildkit": {}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"rootfs": {
|
|
||||||
"type": "layers",
|
|
||||||
"diff_ids": [
|
|
||||||
"sha256:41048e32d0684349141cf05f629c5fc3c5915d1f3426b66dbb8953a540e01e1e",
|
|
||||||
"sha256:2651209b9208fff6c053bc3c17353cb07874e50f1a9bc96d6afd03aef63de76a",
|
|
||||||
"sha256:6741ed7e73039d853fa8902246a4c7e8bf9dd09652fd1b08251bc5f9e8876a7f",
|
|
||||||
"sha256:92ac046adeeb65c86ae3f0b458dee04ad4a462e417661c04d77642c66494f69b"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"history": [
|
|
||||||
{
|
|
||||||
"created": "2021-11-24T20:41:23.709681315Z",
|
|
||||||
"created_by": "/bin/sh -c #(nop) ADD file:cd24c711a2ef431b3ff94f9a02bfc42f159bc60de1d0eceecafea4e8af02441d in / "
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2021-11-24T20:41:23.94211262Z",
|
|
||||||
"created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]",
|
|
||||||
"empty_layer": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-01-26T18:15:21.449825391Z",
|
|
||||||
"created_by": "RUN /bin/sh -c apk add --no-cache fuse3 git openssh pigz xz \u0026\u0026 ln -s fusermount3 /usr/bin/fusermount # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T00:34:00.924540012Z",
|
|
||||||
"created_by": "COPY examples/buildctl-daemonless/buildctl-daemonless.sh /usr/bin/ # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-25T17:13:27.89891722Z",
|
|
||||||
"created_by": "VOLUME [/var/lib/buildkit]",
|
|
||||||
"comment": "buildkit.dockerfile.v0",
|
|
||||||
"empty_layer": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-25T17:13:27.89891722Z",
|
|
||||||
"created_by": "COPY / /usr/bin/ # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-25T17:13:27.89891722Z",
|
|
||||||
"created_by": "ENTRYPOINT [\"buildkitd\"]",
|
|
||||||
"comment": "buildkit.dockerfile.v0",
|
|
||||||
"empty_layer": true
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### <a name="raw"></a> Show original, unformatted JSON manifest (--raw)
|
### <a name="raw"></a> Show original, unformatted JSON manifest (--raw)
|
||||||
|
|
||||||
Use the `--raw` option to print the unformatted JSON manifest bytes.
|
Use the `--raw` option to print the original JSON bytes instead of the formatted
|
||||||
|
output.
|
||||||
> `jq` is used here to get a better rendering of the output result.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect --raw crazymax/loop | jq
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"schemaVersion": 2,
|
|
||||||
"config": {
|
|
||||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
|
||||||
"digest": "sha256:7ace7d324e79b360b2db8b820d83081863d96d22e734cdf297a8e7fd83f6ceb3",
|
|
||||||
"size": 2298
|
|
||||||
},
|
|
||||||
"layers": [
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
|
||||||
"digest": "sha256:5843afab387455b37944e709ee8c78d7520df80f8d01cf7f861aae63beeddb6b",
|
|
||||||
"size": 2811478
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
|
||||||
"digest": "sha256:726d3732a87e1c430d67e8969de6b222a889d45e045ebae1a008a37ba38f3b1f",
|
|
||||||
"size": 1776812
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
|
||||||
"digest": "sha256:5d7cf9b33148a8f220c84f27dd2cfae46aca019a3ea3fbf7274f6d6dbfae8f3b",
|
|
||||||
"size": 382855
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect --raw moby/buildkit:master | jq
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
|
|
||||||
"schemaVersion": 2,
|
|
||||||
"manifests": [
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:667d28c9fb33820ce686887a717a148e89fa77f9097f9352996bbcce99d352b1",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "amd64",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:71789527b64ab3d7b3de01d364b449cd7f7a3da758218fbf73b9c9aae05a6775",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "arm",
|
|
||||||
"os": "linux",
|
|
||||||
"variant": "v7"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:fb64667e1ce6ab0d05478f3a8402af07b27737598dcf9a510fb1d792b13a66be",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "arm64",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:1c3ddf95a0788e23f72f25800c05abc4458946685e2b66788c3d978cde6da92b",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "s390x",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:05bcde6d460a284e5bc88026cd070277e8380355de3126cbc8fe8a452708c6b1",
|
|
||||||
"size": 1159,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "ppc64le",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:c04c57765304ab84f4f9807fff3e11605c3a60e16435c734b02c723680f6bd6e",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "riscv64",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -9,10 +9,10 @@ Inspect current builder instance
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--bootstrap`](#bootstrap) | | | Ensure builder has booted before inspecting |
|
| [`--bootstrap`](#bootstrap) | Ensure builder has booted before inspecting |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
@@ -43,10 +43,6 @@ name of the builder to inspect to get information about that builder.
|
|||||||
The following example shows information about a builder instance named
|
The following example shows information about a builder instance named
|
||||||
`elated_tesla`:
|
`elated_tesla`:
|
||||||
|
|
||||||
> **Note**
|
|
||||||
>
|
|
||||||
> Asterisk `*` next to node build platform(s) indicate they had been set manually during `buildx create`. Otherwise, it had been autodetected.
|
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx inspect elated_tesla
|
$ docker buildx inspect elated_tesla
|
||||||
|
|
||||||
@@ -62,5 +58,5 @@ Platforms: linux/amd64
|
|||||||
Name: elated_tesla1
|
Name: elated_tesla1
|
||||||
Endpoint: ssh://ubuntu@1.2.3.4
|
Endpoint: ssh://ubuntu@1.2.3.4
|
||||||
Status: running
|
Status: running
|
||||||
Platforms: linux/arm64*, linux/arm/v7, linux/arm/v6
|
Platforms: linux/arm64, linux/arm/v7, linux/arm/v6
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -14,16 +14,18 @@ List builder instances
|
|||||||
|
|
||||||
Lists all builder instances and the nodes for each instance
|
Lists all builder instances and the nodes for each instance
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx ls
|
$ docker buildx ls
|
||||||
NAME/NODE DRIVER/ENDPOINT STATUS BUILDKIT PLATFORMS
|
|
||||||
|
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
|
||||||
elated_tesla * docker-container
|
elated_tesla * docker-container
|
||||||
elated_tesla0 unix:///var/run/docker.sock running v0.10.3 linux/amd64
|
elated_tesla0 unix:///var/run/docker.sock running linux/amd64
|
||||||
elated_tesla1 ssh://ubuntu@1.2.3.4 running v0.10.3 linux/arm64*, linux/arm/v7, linux/arm/v6
|
elated_tesla1 ssh://ubuntu@1.2.3.4 running linux/arm64, linux/arm/v7, linux/arm/v6
|
||||||
default docker
|
default docker
|
||||||
default default running 20.10.14 linux/amd64
|
default default running linux/amd64
|
||||||
```
|
```
|
||||||
|
|
||||||
Each builder has one or more nodes associated with it. The current builder's
|
Each builder has one or more nodes associated with it. The current builder's
|
||||||
name is marked with a `*` in `NAME/NODE` and explicit node to build against for
|
name is marked with a `*`.
|
||||||
the target platform marked with a `*` in the `PLATFORMS` column.
|
|
||||||
|
|||||||
@@ -9,38 +9,18 @@ Remove build cache
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| `-a`, `--all` | | | Include internal/frontend images |
|
| `-a`, `--all` | Remove all unused images, not just dangling ones |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
| `--filter` | `filter` | | Provide filter values (e.g., `until=24h`) |
|
| `--filter filter` | Provide filter values (e.g., `until=24h`) |
|
||||||
| `-f`, `--force` | | | Do not prompt for confirmation |
|
| `-f`, `--force` | Do not prompt for confirmation |
|
||||||
| `--keep-storage` | `bytes` | `0` | Amount of disk space to keep for cache |
|
| `--keep-storage bytes` | Amount of disk space to keep for cache |
|
||||||
| `--verbose` | | | Provide a more verbose output |
|
| `--verbose` | Provide a more verbose output |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
Clears the build cache of the selected builder.
|
|
||||||
|
|
||||||
You can finely control what cache data is kept using:
|
|
||||||
|
|
||||||
- The `--filter=until=<duration>` flag to keep images that have been used in
|
|
||||||
the last `<duration>` time.
|
|
||||||
|
|
||||||
`<duration>` is a duration string, e.g. `24h` or `2h30m`, with allowable
|
|
||||||
units of `(h)ours`, `(m)inutes` and `(s)econds`.
|
|
||||||
|
|
||||||
- The `--keep-storage=<size>` flag to keep `<size>` bytes of data in the cache.
|
|
||||||
|
|
||||||
`<size>` is a human-readable memory string, e.g. `128mb`, `2gb`, etc. Units
|
|
||||||
are case-insensitive.
|
|
||||||
|
|
||||||
- The `--all` flag to allow clearing internal helper images and frontend images
|
|
||||||
set using the `#syntax=` directive or the `BUILDKIT_SYNTAX` build argument.
|
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|||||||
@@ -9,13 +9,10 @@ Remove a builder instance
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--all-inactive`](#all-inactive) | | | Remove all inactive builders |
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--keep-state`](#keep-state) | Keep BuildKit state |
|
||||||
| [`-f`](#force), [`--force`](#force) | | | Do not prompt for confirmation |
|
|
||||||
| [`--keep-daemon`](#keep-daemon) | | | Keep the buildkitd daemon running |
|
|
||||||
| [`--keep-state`](#keep-state) | | | Keep BuildKit state |
|
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
@@ -27,32 +24,10 @@ default builder.
|
|||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
### <a name="all-inactive"></a> Remove all inactive builders (--all-inactive)
|
|
||||||
|
|
||||||
Remove builders that are not in running state.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx rm --all-inactive
|
|
||||||
WARNING! This will remove all builders that are not in running state. Are you sure you want to continue? [y/N] y
|
|
||||||
```
|
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|
||||||
Same as [`buildx --builder`](buildx.md#builder).
|
Same as [`buildx --builder`](buildx.md#builder).
|
||||||
|
|
||||||
### <a name="force"></a> Do not prompt for confirmation (--force)
|
|
||||||
|
|
||||||
Do not prompt for confirmation before removing inactive builders.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx rm --all-inactive --force
|
|
||||||
```
|
|
||||||
|
|
||||||
### <a name="keep-daemon"></a> Keep the buildkitd daemon running (--keep-daemon)
|
|
||||||
|
|
||||||
Keep the buildkitd daemon running after the buildx context is removed. This is useful when you manage buildkitd daemons and buildx contexts independently.
|
|
||||||
Currently, only supported by the [`docker-container` and `kubernetes` drivers](buildx_create.md#driver).
|
|
||||||
|
|
||||||
### <a name="keep-state"></a> Keep BuildKit state (--keep-state)
|
### <a name="keep-state"></a> Keep BuildKit state (--keep-state)
|
||||||
|
|
||||||
Keep BuildKit state, so it can be reused by a new builder with the same name.
|
Keep BuildKit state, so it can be reused by a new builder with the same name.
|
||||||
|
|||||||
@@ -9,9 +9,9 @@ Stop builder instance
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|||||||
@@ -9,11 +9,11 @@ Set the current builder instance
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
| `--default` | | | Set builder as default for current context |
|
| `--default` | Set builder as default for current context |
|
||||||
| `--global` | | | Builder persists context changes |
|
| `--global` | Builder persists context changes |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|||||||
@@ -10,9 +10,10 @@ Show buildx version information
|
|||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
## Description
|
## Examples
|
||||||
|
|
||||||
|
### View version information
|
||||||
|
|
||||||
View version information
|
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx version
|
$ docker buildx version
|
||||||
|
|||||||
@@ -4,11 +4,11 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
@@ -90,7 +90,7 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = io.Copy(io.Discard, rc)
|
_, err = io.Copy(ioutil.Discard, rc)
|
||||||
return err
|
return err
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
// image pulling failed, check if it exists in local image store.
|
// image pulling failed, check if it exists in local image store.
|
||||||
@@ -287,34 +287,18 @@ func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Version(ctx context.Context) (string, error) {
|
|
||||||
bufStdout := &bytes.Buffer{}
|
|
||||||
bufStderr := &bytes.Buffer{}
|
|
||||||
if err := d.run(ctx, []string{"buildkitd", "--version"}, bufStdout, bufStderr); err != nil {
|
|
||||||
if bufStderr.Len() > 0 {
|
|
||||||
return "", errors.Wrap(err, bufStderr.String())
|
|
||||||
}
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
version := strings.Fields(bufStdout.String())
|
|
||||||
if len(version) != 4 {
|
|
||||||
return "", errors.Errorf("unexpected version format: %s", bufStdout.String())
|
|
||||||
}
|
|
||||||
return version[2], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Driver) Stop(ctx context.Context, force bool) error {
|
func (d *Driver) Stop(ctx context.Context, force bool) error {
|
||||||
info, err := d.Info(ctx)
|
info, err := d.Info(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info.Status == driver.Running {
|
if info.Status == driver.Running {
|
||||||
return d.DockerAPI.ContainerStop(ctx, d.Name, container.StopOptions{})
|
return d.DockerAPI.ContainerStop(ctx, d.Name, nil)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||||
info, err := d.Info(ctx)
|
info, err := d.Info(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -324,22 +308,20 @@ func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if rmDaemon {
|
if err := d.DockerAPI.ContainerRemove(ctx, d.Name, dockertypes.ContainerRemoveOptions{
|
||||||
if err := d.DockerAPI.ContainerRemove(ctx, d.Name, dockertypes.ContainerRemoveOptions{
|
RemoveVolumes: true,
|
||||||
RemoveVolumes: true,
|
Force: force,
|
||||||
Force: force,
|
}); err != nil {
|
||||||
}); err != nil {
|
return err
|
||||||
return err
|
}
|
||||||
}
|
for _, v := range container.Mounts {
|
||||||
for _, v := range container.Mounts {
|
if v.Name == d.Name+volumeStateSuffix {
|
||||||
if v.Name != d.Name+volumeStateSuffix {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if rmVolume {
|
if rmVolume {
|
||||||
return d.DockerAPI.VolumeRemove(ctx, d.Name+volumeStateSuffix, false)
|
return d.DockerAPI.VolumeRemove(ctx, d.Name+volumeStateSuffix, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ func (*factory) Usage() string {
|
|||||||
return "docker-container"
|
return "docker-container"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*factory) Priority(ctx context.Context, endpoint string, api dockerclient.APIClient) int {
|
func (*factory) Priority(ctx context.Context, api dockerclient.APIClient) int {
|
||||||
if api == nil {
|
if api == nil {
|
||||||
return priorityUnsupported
|
return priorityUnsupported
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,19 +29,11 @@ func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Version(ctx context.Context) (string, error) {
|
|
||||||
v, err := d.DockerAPI.ServerVersion(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return "", errors.Wrapf(driver.ErrNotConnecting, err.Error())
|
|
||||||
}
|
|
||||||
return v.Version, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Driver) Stop(ctx context.Context, force bool) error {
|
func (d *Driver) Stop(ctx context.Context, force bool) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -54,22 +46,11 @@ func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Features() map[driver.Feature]bool {
|
func (d *Driver) Features() map[driver.Feature]bool {
|
||||||
var useContainerdSnapshotter bool
|
|
||||||
ctx := context.Background()
|
|
||||||
c, err := d.Client(ctx)
|
|
||||||
if err == nil {
|
|
||||||
workers, _ := c.ListWorkers(ctx)
|
|
||||||
for _, w := range workers {
|
|
||||||
if _, ok := w.Labels["org.mobyproject.buildkit.worker.snapshotter"]; ok {
|
|
||||||
useContainerdSnapshotter = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return map[driver.Feature]bool{
|
return map[driver.Feature]bool{
|
||||||
driver.OCIExporter: useContainerdSnapshotter,
|
driver.OCIExporter: false,
|
||||||
driver.DockerExporter: useContainerdSnapshotter,
|
driver.DockerExporter: false,
|
||||||
driver.CacheExport: useContainerdSnapshotter,
|
driver.CacheExport: false,
|
||||||
driver.MultiPlatform: useContainerdSnapshotter,
|
driver.MultiPlatform: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ func (*factory) Usage() string {
|
|||||||
return "docker"
|
return "docker"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*factory) Priority(ctx context.Context, endpoint string, api dockerclient.APIClient) int {
|
func (*factory) Priority(ctx context.Context, api dockerclient.APIClient) int {
|
||||||
if api == nil {
|
if api == nil {
|
||||||
return priorityUnsupported
|
return priorityUnsupported
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -53,9 +53,8 @@ type Driver interface {
|
|||||||
Factory() Factory
|
Factory() Factory
|
||||||
Bootstrap(context.Context, progress.Logger) error
|
Bootstrap(context.Context, progress.Logger) error
|
||||||
Info(context.Context) (*Info, error)
|
Info(context.Context) (*Info, error)
|
||||||
Version(context.Context) (string, error)
|
|
||||||
Stop(ctx context.Context, force bool) error
|
Stop(ctx context.Context, force bool) error
|
||||||
Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error
|
Rm(ctx context.Context, force bool, rmVolume bool) error
|
||||||
Client(ctx context.Context) (*client.Client, error)
|
Client(ctx context.Context) (*client.Client, error)
|
||||||
Features() map[Feature]bool
|
Features() map[Feature]bool
|
||||||
IsMobyDriver() bool
|
IsMobyDriver() bool
|
||||||
|
|||||||
@@ -1,224 +0,0 @@
|
|||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/docker/cli/cli/context"
|
|
||||||
"github.com/docker/cli/cli/context/store"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
|
||||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
func testEndpoint(server, defaultNamespace string, ca, cert, key []byte, skipTLSVerify bool) Endpoint {
|
|
||||||
var tlsData *context.TLSData
|
|
||||||
if ca != nil || cert != nil || key != nil {
|
|
||||||
tlsData = &context.TLSData{
|
|
||||||
CA: ca,
|
|
||||||
Cert: cert,
|
|
||||||
Key: key,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Endpoint{
|
|
||||||
EndpointMeta: EndpointMeta{
|
|
||||||
EndpointMetaBase: context.EndpointMetaBase{
|
|
||||||
Host: server,
|
|
||||||
SkipTLSVerify: skipTLSVerify,
|
|
||||||
},
|
|
||||||
DefaultNamespace: defaultNamespace,
|
|
||||||
},
|
|
||||||
TLSData: tlsData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var testStoreCfg = store.NewConfig(
|
|
||||||
func() interface{} {
|
|
||||||
return &map[string]interface{}{}
|
|
||||||
},
|
|
||||||
store.EndpointTypeGetter(KubernetesEndpoint, func() interface{} { return &EndpointMeta{} }),
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSaveLoadContexts(t *testing.T) {
|
|
||||||
storeDir, err := os.MkdirTemp("", "test-load-save-k8-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer os.RemoveAll(storeDir)
|
|
||||||
store := store.New(storeDir, testStoreCfg)
|
|
||||||
require.NoError(t, save(store, testEndpoint("https://test", "test", nil, nil, nil, false), "raw-notls"))
|
|
||||||
require.NoError(t, save(store, testEndpoint("https://test", "test", nil, nil, nil, true), "raw-notls-skip"))
|
|
||||||
require.NoError(t, save(store, testEndpoint("https://test", "test", []byte("ca"), []byte("cert"), []byte("key"), true), "raw-tls"))
|
|
||||||
|
|
||||||
kcFile, err := os.CreateTemp(os.TempDir(), "test-load-save-k8-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer os.Remove(kcFile.Name())
|
|
||||||
defer kcFile.Close()
|
|
||||||
cfg := clientcmdapi.NewConfig()
|
|
||||||
cfg.AuthInfos["user"] = clientcmdapi.NewAuthInfo()
|
|
||||||
cfg.Contexts["context1"] = clientcmdapi.NewContext()
|
|
||||||
cfg.Clusters["cluster1"] = clientcmdapi.NewCluster()
|
|
||||||
cfg.Contexts["context2"] = clientcmdapi.NewContext()
|
|
||||||
cfg.Clusters["cluster2"] = clientcmdapi.NewCluster()
|
|
||||||
cfg.AuthInfos["user"].ClientCertificateData = []byte("cert")
|
|
||||||
cfg.AuthInfos["user"].ClientKeyData = []byte("key")
|
|
||||||
cfg.Clusters["cluster1"].Server = "https://server1"
|
|
||||||
cfg.Clusters["cluster1"].InsecureSkipTLSVerify = true
|
|
||||||
cfg.Clusters["cluster2"].Server = "https://server2"
|
|
||||||
cfg.Clusters["cluster2"].CertificateAuthorityData = []byte("ca")
|
|
||||||
cfg.Contexts["context1"].AuthInfo = "user"
|
|
||||||
cfg.Contexts["context1"].Cluster = "cluster1"
|
|
||||||
cfg.Contexts["context1"].Namespace = "namespace1"
|
|
||||||
cfg.Contexts["context2"].AuthInfo = "user"
|
|
||||||
cfg.Contexts["context2"].Cluster = "cluster2"
|
|
||||||
cfg.Contexts["context2"].Namespace = "namespace2"
|
|
||||||
cfg.CurrentContext = "context1"
|
|
||||||
cfgData, err := clientcmd.Write(*cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = kcFile.Write(cfgData)
|
|
||||||
require.NoError(t, err)
|
|
||||||
kcFile.Close()
|
|
||||||
|
|
||||||
epDefault, err := FromKubeConfig(kcFile.Name(), "", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
epContext2, err := FromKubeConfig(kcFile.Name(), "context2", "namespace-override")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, save(store, epDefault, "embed-default-context"))
|
|
||||||
require.NoError(t, save(store, epContext2, "embed-context2"))
|
|
||||||
|
|
||||||
rawNoTLSMeta, err := store.GetMetadata("raw-notls")
|
|
||||||
require.NoError(t, err)
|
|
||||||
rawNoTLSSkipMeta, err := store.GetMetadata("raw-notls-skip")
|
|
||||||
require.NoError(t, err)
|
|
||||||
rawTLSMeta, err := store.GetMetadata("raw-tls")
|
|
||||||
require.NoError(t, err)
|
|
||||||
embededDefaultMeta, err := store.GetMetadata("embed-default-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
embededContext2Meta, err := store.GetMetadata("embed-context2")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
rawNoTLS := EndpointFromContext(rawNoTLSMeta)
|
|
||||||
rawNoTLSSkip := EndpointFromContext(rawNoTLSSkipMeta)
|
|
||||||
rawTLS := EndpointFromContext(rawTLSMeta)
|
|
||||||
embededDefault := EndpointFromContext(embededDefaultMeta)
|
|
||||||
embededContext2 := EndpointFromContext(embededContext2Meta)
|
|
||||||
|
|
||||||
rawNoTLSEP, err := rawNoTLS.WithTLSData(store, "raw-notls")
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkClientConfig(t, rawNoTLSEP, "https://test", "test", nil, nil, nil, false)
|
|
||||||
rawNoTLSSkipEP, err := rawNoTLSSkip.WithTLSData(store, "raw-notls-skip")
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkClientConfig(t, rawNoTLSSkipEP, "https://test", "test", nil, nil, nil, true)
|
|
||||||
rawTLSEP, err := rawTLS.WithTLSData(store, "raw-tls")
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkClientConfig(t, rawTLSEP, "https://test", "test", []byte("ca"), []byte("cert"), []byte("key"), true)
|
|
||||||
embededDefaultEP, err := embededDefault.WithTLSData(store, "embed-default-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkClientConfig(t, embededDefaultEP, "https://server1", "namespace1", nil, []byte("cert"), []byte("key"), true)
|
|
||||||
embededContext2EP, err := embededContext2.WithTLSData(store, "embed-context2")
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkClientConfig(t, embededContext2EP, "https://server2", "namespace-override", []byte("ca"), []byte("cert"), []byte("key"), false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkClientConfig(t *testing.T, ep Endpoint, server, namespace string, ca, cert, key []byte, skipTLSVerify bool) {
|
|
||||||
config := ep.KubernetesConfig()
|
|
||||||
cfg, err := config.ClientConfig()
|
|
||||||
require.NoError(t, err)
|
|
||||||
ns, _, _ := config.Namespace()
|
|
||||||
assert.Equal(t, server, cfg.Host)
|
|
||||||
assert.Equal(t, namespace, ns)
|
|
||||||
assert.Equal(t, ca, cfg.CAData)
|
|
||||||
assert.Equal(t, cert, cfg.CertData)
|
|
||||||
assert.Equal(t, key, cfg.KeyData)
|
|
||||||
assert.Equal(t, skipTLSVerify, cfg.Insecure)
|
|
||||||
}
|
|
||||||
|
|
||||||
func save(s store.Writer, ep Endpoint, name string) error {
|
|
||||||
meta := store.Metadata{
|
|
||||||
Endpoints: map[string]interface{}{
|
|
||||||
KubernetesEndpoint: ep.EndpointMeta,
|
|
||||||
},
|
|
||||||
Name: name,
|
|
||||||
}
|
|
||||||
if err := s.CreateOrUpdate(meta); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return s.ResetEndpointTLSMaterial(name, KubernetesEndpoint, ep.TLSData.ToStoreTLSData())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSaveLoadGKEConfig(t *testing.T) {
|
|
||||||
storeDir, err := os.MkdirTemp("", t.Name())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer os.RemoveAll(storeDir)
|
|
||||||
store := store.New(storeDir, testStoreCfg)
|
|
||||||
cfg, err := clientcmd.LoadFromFile("fixtures/gke-kubeconfig")
|
|
||||||
require.NoError(t, err)
|
|
||||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
|
||||||
expectedCfg, err := clientCfg.ClientConfig()
|
|
||||||
require.NoError(t, err)
|
|
||||||
ep, err := FromKubeConfig("fixtures/gke-kubeconfig", "", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, save(store, ep, "gke-context"))
|
|
||||||
persistedMetadata, err := store.GetMetadata("gke-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
|
||||||
assert.True(t, persistedEPMeta != nil)
|
|
||||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "gke-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
persistedCfg := persistedEP.KubernetesConfig()
|
|
||||||
actualCfg, err := persistedCfg.ClientConfig()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, expectedCfg.AuthProvider, actualCfg.AuthProvider)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSaveLoadEKSConfig(t *testing.T) {
|
|
||||||
storeDir, err := os.MkdirTemp("", t.Name())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer os.RemoveAll(storeDir)
|
|
||||||
store := store.New(storeDir, testStoreCfg)
|
|
||||||
cfg, err := clientcmd.LoadFromFile("fixtures/eks-kubeconfig")
|
|
||||||
require.NoError(t, err)
|
|
||||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
|
||||||
expectedCfg, err := clientCfg.ClientConfig()
|
|
||||||
require.NoError(t, err)
|
|
||||||
ep, err := FromKubeConfig("fixtures/eks-kubeconfig", "", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, save(store, ep, "eks-context"))
|
|
||||||
persistedMetadata, err := store.GetMetadata("eks-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
|
||||||
assert.True(t, persistedEPMeta != nil)
|
|
||||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "eks-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
persistedCfg := persistedEP.KubernetesConfig()
|
|
||||||
actualCfg, err := persistedCfg.ClientConfig()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, expectedCfg.ExecProvider, actualCfg.ExecProvider)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSaveLoadK3SConfig(t *testing.T) {
|
|
||||||
storeDir, err := os.MkdirTemp("", t.Name())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer os.RemoveAll(storeDir)
|
|
||||||
store := store.New(storeDir, testStoreCfg)
|
|
||||||
cfg, err := clientcmd.LoadFromFile("fixtures/k3s-kubeconfig")
|
|
||||||
require.NoError(t, err)
|
|
||||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
|
||||||
expectedCfg, err := clientCfg.ClientConfig()
|
|
||||||
require.NoError(t, err)
|
|
||||||
ep, err := FromKubeConfig("fixtures/k3s-kubeconfig", "", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, save(store, ep, "k3s-context"))
|
|
||||||
persistedMetadata, err := store.GetMetadata("k3s-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
|
||||||
assert.True(t, persistedEPMeta != nil)
|
|
||||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "k3s-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
persistedCfg := persistedEP.KubernetesConfig()
|
|
||||||
actualCfg, err := persistedCfg.ClientConfig()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, len(actualCfg.Username) > 0)
|
|
||||||
assert.True(t, len(actualCfg.Password) > 0)
|
|
||||||
assert.Equal(t, expectedCfg.Username, actualCfg.Username)
|
|
||||||
assert.Equal(t, expectedCfg.Password, actualCfg.Password)
|
|
||||||
}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
clusters:
|
|
||||||
- cluster:
|
|
||||||
server: https://some-server
|
|
||||||
name: kubernetes
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: kubernetes
|
|
||||||
user: aws
|
|
||||||
name: aws
|
|
||||||
current-context: aws
|
|
||||||
kind: Config
|
|
||||||
preferences: {}
|
|
||||||
users:
|
|
||||||
- name: aws
|
|
||||||
user:
|
|
||||||
exec:
|
|
||||||
apiVersion: client.authentication.k8s.io/v1alpha1
|
|
||||||
command: heptio-authenticator-aws
|
|
||||||
args:
|
|
||||||
- "token"
|
|
||||||
- "-i"
|
|
||||||
- "eks-cf"
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
clusters:
|
|
||||||
- cluster:
|
|
||||||
server: https://some-server
|
|
||||||
name: gke_sample
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: gke_sample
|
|
||||||
user: gke_sample
|
|
||||||
name: gke_sample
|
|
||||||
current-context: gke_sample
|
|
||||||
kind: Config
|
|
||||||
preferences: {}
|
|
||||||
users:
|
|
||||||
- name: gke_sample
|
|
||||||
user:
|
|
||||||
auth-provider:
|
|
||||||
config:
|
|
||||||
cmd-args: config config-helper --format=json
|
|
||||||
cmd-path: /google/google-cloud-sdk/bin/gcloud
|
|
||||||
expiry-key: '{.credential.token_expiry}'
|
|
||||||
token-key: '{.credential.access_token}'
|
|
||||||
name: gcp
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
clusters:
|
|
||||||
- cluster:
|
|
||||||
certificate-authority-data: dGhlLWNh
|
|
||||||
server: https://someserver
|
|
||||||
name: test-cluster
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: test-cluster
|
|
||||||
user: test-user
|
|
||||||
namespace: zoinx
|
|
||||||
name: test
|
|
||||||
current-context: test
|
|
||||||
kind: Config
|
|
||||||
preferences: {}
|
|
||||||
users:
|
|
||||||
- name: test-user
|
|
||||||
user:
|
|
||||||
username: admin
|
|
||||||
password: testpwd
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
clusters:
|
|
||||||
- cluster:
|
|
||||||
certificate-authority-data: dGhlLWNh
|
|
||||||
server: https://someserver
|
|
||||||
name: test-cluster
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: test-cluster
|
|
||||||
user: test-user
|
|
||||||
namespace: zoinx
|
|
||||||
name: test
|
|
||||||
current-context: test
|
|
||||||
kind: Config
|
|
||||||
preferences: {}
|
|
||||||
users:
|
|
||||||
- name: test-user
|
|
||||||
user:
|
|
||||||
client-certificate-data: dGhlLWNlcnQ=
|
|
||||||
client-key-data: dGhlLWtleQ==
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/docker/cli/cli/command"
|
|
||||||
cliflags "github.com/docker/cli/cli/flags"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDefaultContextInitializer(t *testing.T) {
|
|
||||||
os.Setenv("KUBECONFIG", "./fixtures/test-kubeconfig")
|
|
||||||
defer os.Unsetenv("KUBECONFIG")
|
|
||||||
ctx, err := command.ResolveDefaultContext(&cliflags.CommonOptions{}, command.DefaultContextStoreConfig())
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "default", ctx.Meta.Name)
|
|
||||||
assert.Equal(t, "zoinx", ctx.Meta.Endpoints[KubernetesEndpoint].(EndpointMeta).DefaultNamespace)
|
|
||||||
}
|
|
||||||
@@ -160,20 +160,12 @@ func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Version(ctx context.Context) (string, error) {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Driver) Stop(ctx context.Context, force bool) error {
|
func (d *Driver) Stop(ctx context.Context, force bool) error {
|
||||||
// future version may scale the replicas to zero here
|
// future version may scale the replicas to zero here
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||||
if !rmDaemon {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := d.deploymentClient.Delete(ctx, d.deployment.Name, metav1.DeleteOptions{}); err != nil {
|
if err := d.deploymentClient.Delete(ctx, d.deployment.Name, metav1.DeleteOptions{}); err != nil {
|
||||||
if !apierrors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
return errors.Wrapf(err, "error while calling deploymentClient.Delete for %q", d.deployment.Name)
|
return errors.Wrapf(err, "error while calling deploymentClient.Delete for %q", d.deployment.Name)
|
||||||
|
|||||||
@@ -5,8 +5,6 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/driver/bkimage"
|
"github.com/docker/buildx/driver/bkimage"
|
||||||
"github.com/docker/buildx/driver/kubernetes/manifest"
|
"github.com/docker/buildx/driver/kubernetes/manifest"
|
||||||
@@ -34,7 +32,7 @@ func (*factory) Usage() string {
|
|||||||
return DriverName
|
return DriverName
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*factory) Priority(ctx context.Context, endpoint string, api dockerclient.APIClient) int {
|
func (*factory) Priority(ctx context.Context, api dockerclient.APIClient) int {
|
||||||
if api == nil {
|
if api == nil {
|
||||||
return priorityUnsupported
|
return priorityUnsupported
|
||||||
}
|
}
|
||||||
@@ -68,9 +66,77 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
|||||||
clientset: clientset,
|
clientset: clientset,
|
||||||
}
|
}
|
||||||
|
|
||||||
deploymentOpt, loadbalance, namespace, err := f.processDriverOpts(deploymentName, namespace, cfg)
|
deploymentOpt := &manifest.DeploymentOpt{
|
||||||
if nil != err {
|
Name: deploymentName,
|
||||||
return nil, err
|
Image: bkimage.DefaultImage,
|
||||||
|
Replicas: 1,
|
||||||
|
BuildkitFlags: cfg.BuildkitFlags,
|
||||||
|
Rootless: false,
|
||||||
|
Platforms: cfg.Platforms,
|
||||||
|
ConfigFiles: cfg.Files,
|
||||||
|
}
|
||||||
|
|
||||||
|
deploymentOpt.Qemu.Image = bkimage.QemuImage
|
||||||
|
|
||||||
|
loadbalance := LoadbalanceSticky
|
||||||
|
|
||||||
|
for k, v := range cfg.DriverOpts {
|
||||||
|
switch k {
|
||||||
|
case "image":
|
||||||
|
if v != "" {
|
||||||
|
deploymentOpt.Image = v
|
||||||
|
}
|
||||||
|
case "namespace":
|
||||||
|
namespace = v
|
||||||
|
case "replicas":
|
||||||
|
deploymentOpt.Replicas, err = strconv.Atoi(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case "requests.cpu":
|
||||||
|
deploymentOpt.RequestsCPU = v
|
||||||
|
case "requests.memory":
|
||||||
|
deploymentOpt.RequestsMemory = v
|
||||||
|
case "limits.cpu":
|
||||||
|
deploymentOpt.LimitsCPU = v
|
||||||
|
case "limits.memory":
|
||||||
|
deploymentOpt.LimitsMemory = v
|
||||||
|
case "rootless":
|
||||||
|
deploymentOpt.Rootless, err = strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
deploymentOpt.Image = bkimage.DefaultRootlessImage
|
||||||
|
case "nodeselector":
|
||||||
|
kvs := strings.Split(strings.Trim(v, `"`), ",")
|
||||||
|
s := map[string]string{}
|
||||||
|
for i := range kvs {
|
||||||
|
kv := strings.Split(kvs[i], "=")
|
||||||
|
if len(kv) == 2 {
|
||||||
|
s[kv[0]] = kv[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
deploymentOpt.NodeSelector = s
|
||||||
|
case "loadbalance":
|
||||||
|
switch v {
|
||||||
|
case LoadbalanceSticky:
|
||||||
|
case LoadbalanceRandom:
|
||||||
|
default:
|
||||||
|
return nil, errors.Errorf("invalid loadbalance %q", v)
|
||||||
|
}
|
||||||
|
loadbalance = v
|
||||||
|
case "qemu.install":
|
||||||
|
deploymentOpt.Qemu.Install, err = strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case "qemu.image":
|
||||||
|
if v != "" {
|
||||||
|
deploymentOpt.Qemu.Image = v
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return nil, errors.Errorf("invalid driver option %s for driver %s", k, DriverName)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
d.deployment, d.configMaps, err = manifest.NewDeployment(deploymentOpt)
|
d.deployment, d.configMaps, err = manifest.NewDeployment(deploymentOpt)
|
||||||
@@ -100,121 +166,6 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
|||||||
return d, nil
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *factory) processDriverOpts(deploymentName string, namespace string, cfg driver.InitConfig) (*manifest.DeploymentOpt, string, string, error) {
|
|
||||||
deploymentOpt := &manifest.DeploymentOpt{
|
|
||||||
Name: deploymentName,
|
|
||||||
Image: bkimage.DefaultImage,
|
|
||||||
Replicas: 1,
|
|
||||||
BuildkitFlags: cfg.BuildkitFlags,
|
|
||||||
Rootless: false,
|
|
||||||
Platforms: cfg.Platforms,
|
|
||||||
ConfigFiles: cfg.Files,
|
|
||||||
}
|
|
||||||
|
|
||||||
deploymentOpt.Qemu.Image = bkimage.QemuImage
|
|
||||||
|
|
||||||
loadbalance := LoadbalanceSticky
|
|
||||||
var err error
|
|
||||||
|
|
||||||
for k, v := range cfg.DriverOpts {
|
|
||||||
switch k {
|
|
||||||
case "image":
|
|
||||||
if v != "" {
|
|
||||||
deploymentOpt.Image = v
|
|
||||||
}
|
|
||||||
case "namespace":
|
|
||||||
namespace = v
|
|
||||||
case "replicas":
|
|
||||||
deploymentOpt.Replicas, err = strconv.Atoi(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", "", err
|
|
||||||
}
|
|
||||||
case "requests.cpu":
|
|
||||||
deploymentOpt.RequestsCPU = v
|
|
||||||
case "requests.memory":
|
|
||||||
deploymentOpt.RequestsMemory = v
|
|
||||||
case "limits.cpu":
|
|
||||||
deploymentOpt.LimitsCPU = v
|
|
||||||
case "limits.memory":
|
|
||||||
deploymentOpt.LimitsMemory = v
|
|
||||||
case "rootless":
|
|
||||||
deploymentOpt.Rootless, err = strconv.ParseBool(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", "", err
|
|
||||||
}
|
|
||||||
if _, isImage := cfg.DriverOpts["image"]; !isImage {
|
|
||||||
deploymentOpt.Image = bkimage.DefaultRootlessImage
|
|
||||||
}
|
|
||||||
case "nodeselector":
|
|
||||||
kvs := strings.Split(strings.Trim(v, `"`), ",")
|
|
||||||
s := map[string]string{}
|
|
||||||
for i := range kvs {
|
|
||||||
kv := strings.Split(kvs[i], "=")
|
|
||||||
if len(kv) == 2 {
|
|
||||||
s[kv[0]] = kv[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
deploymentOpt.NodeSelector = s
|
|
||||||
case "tolerations":
|
|
||||||
ts := strings.Split(v, ";")
|
|
||||||
deploymentOpt.Tolerations = []corev1.Toleration{}
|
|
||||||
for i := range ts {
|
|
||||||
kvs := strings.Split(ts[i], ",")
|
|
||||||
|
|
||||||
t := corev1.Toleration{}
|
|
||||||
|
|
||||||
for j := range kvs {
|
|
||||||
kv := strings.Split(kvs[j], "=")
|
|
||||||
if len(kv) == 2 {
|
|
||||||
switch kv[0] {
|
|
||||||
case "key":
|
|
||||||
t.Key = kv[1]
|
|
||||||
case "operator":
|
|
||||||
t.Operator = corev1.TolerationOperator(kv[1])
|
|
||||||
case "value":
|
|
||||||
t.Value = kv[1]
|
|
||||||
case "effect":
|
|
||||||
t.Effect = corev1.TaintEffect(kv[1])
|
|
||||||
case "tolerationSeconds":
|
|
||||||
c, err := strconv.Atoi(kv[1])
|
|
||||||
if nil != err {
|
|
||||||
return nil, "", "", err
|
|
||||||
}
|
|
||||||
c64 := int64(c)
|
|
||||||
t.TolerationSeconds = &c64
|
|
||||||
default:
|
|
||||||
return nil, "", "", errors.Errorf("invalid tolaration %q", v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
deploymentOpt.Tolerations = append(deploymentOpt.Tolerations, t)
|
|
||||||
}
|
|
||||||
case "loadbalance":
|
|
||||||
switch v {
|
|
||||||
case LoadbalanceSticky:
|
|
||||||
case LoadbalanceRandom:
|
|
||||||
default:
|
|
||||||
return nil, "", "", errors.Errorf("invalid loadbalance %q", v)
|
|
||||||
}
|
|
||||||
loadbalance = v
|
|
||||||
case "qemu.install":
|
|
||||||
deploymentOpt.Qemu.Install, err = strconv.ParseBool(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", "", err
|
|
||||||
}
|
|
||||||
case "qemu.image":
|
|
||||||
if v != "" {
|
|
||||||
deploymentOpt.Qemu.Image = v
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil, "", "", errors.Errorf("invalid driver option %s for driver %s", k, DriverName)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return deploymentOpt, loadbalance, namespace, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *factory) AllowsInstances() bool {
|
func (f *factory) AllowsInstances() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,230 +0,0 @@
|
|||||||
package kubernetes
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/driver"
|
|
||||||
"github.com/docker/buildx/driver/bkimage"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/client-go/rest"
|
|
||||||
)
|
|
||||||
|
|
||||||
type mockKubeClientConfig struct {
|
|
||||||
clientConfig *rest.Config
|
|
||||||
namespace string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *mockKubeClientConfig) ClientConfig() (*rest.Config, error) {
|
|
||||||
return r.clientConfig, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *mockKubeClientConfig) Namespace() (string, bool, error) {
|
|
||||||
return r.namespace, true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFactory_processDriverOpts(t *testing.T) {
|
|
||||||
kcc := mockKubeClientConfig{
|
|
||||||
clientConfig: &rest.Config{},
|
|
||||||
}
|
|
||||||
|
|
||||||
cfg := driver.InitConfig{
|
|
||||||
Name: "buildx_buildkit_test",
|
|
||||||
KubeClientConfig: &kcc,
|
|
||||||
}
|
|
||||||
f := factory{}
|
|
||||||
|
|
||||||
t.Run(
|
|
||||||
"ValidOptions", func(t *testing.T) {
|
|
||||||
cfg.DriverOpts = map[string]string{
|
|
||||||
"namespace": "test-ns",
|
|
||||||
"image": "test:latest",
|
|
||||||
"replicas": "2",
|
|
||||||
"requests.cpu": "100m",
|
|
||||||
"requests.memory": "32Mi",
|
|
||||||
"limits.cpu": "200m",
|
|
||||||
"limits.memory": "64Mi",
|
|
||||||
"rootless": "true",
|
|
||||||
"nodeselector": "selector1=value1,selector2=value2",
|
|
||||||
"tolerations": "key=tolerationKey1,value=tolerationValue1,operator=Equal,effect=NoSchedule,tolerationSeconds=60;key=tolerationKey2,operator=Exists",
|
|
||||||
"loadbalance": "random",
|
|
||||||
"qemu.install": "true",
|
|
||||||
"qemu.image": "qemu:latest",
|
|
||||||
}
|
|
||||||
ns := "test"
|
|
||||||
|
|
||||||
r, loadbalance, ns, err := f.processDriverOpts(cfg.Name, ns, cfg)
|
|
||||||
|
|
||||||
nodeSelectors := map[string]string{
|
|
||||||
"selector1": "value1",
|
|
||||||
"selector2": "value2",
|
|
||||||
}
|
|
||||||
|
|
||||||
ts := int64(60)
|
|
||||||
tolerations := []v1.Toleration{
|
|
||||||
{
|
|
||||||
Key: "tolerationKey1",
|
|
||||||
Operator: v1.TolerationOpEqual,
|
|
||||||
Value: "tolerationValue1",
|
|
||||||
Effect: v1.TaintEffectNoSchedule,
|
|
||||||
TolerationSeconds: &ts,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Key: "tolerationKey2",
|
|
||||||
Operator: v1.TolerationOpExists,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, "test-ns", ns)
|
|
||||||
require.Equal(t, "test:latest", r.Image)
|
|
||||||
require.Equal(t, 2, r.Replicas)
|
|
||||||
require.Equal(t, "100m", r.RequestsCPU)
|
|
||||||
require.Equal(t, "32Mi", r.RequestsMemory)
|
|
||||||
require.Equal(t, "200m", r.LimitsCPU)
|
|
||||||
require.Equal(t, "64Mi", r.LimitsMemory)
|
|
||||||
require.True(t, r.Rootless)
|
|
||||||
require.Equal(t, nodeSelectors, r.NodeSelector)
|
|
||||||
require.Equal(t, tolerations, r.Tolerations)
|
|
||||||
require.Equal(t, LoadbalanceRandom, loadbalance)
|
|
||||||
require.True(t, r.Qemu.Install)
|
|
||||||
require.Equal(t, "qemu:latest", r.Qemu.Image)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
t.Run(
|
|
||||||
"NoOptions", func(t *testing.T) {
|
|
||||||
cfg.DriverOpts = map[string]string{}
|
|
||||||
|
|
||||||
r, loadbalance, ns, err := f.processDriverOpts(cfg.Name, "test", cfg)
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, "test", ns)
|
|
||||||
require.Equal(t, bkimage.DefaultImage, r.Image)
|
|
||||||
require.Equal(t, 1, r.Replicas)
|
|
||||||
require.Equal(t, "", r.RequestsCPU)
|
|
||||||
require.Equal(t, "", r.RequestsMemory)
|
|
||||||
require.Equal(t, "", r.LimitsCPU)
|
|
||||||
require.Equal(t, "", r.LimitsMemory)
|
|
||||||
require.False(t, r.Rootless)
|
|
||||||
require.Empty(t, r.NodeSelector)
|
|
||||||
require.Empty(t, r.Tolerations)
|
|
||||||
require.Equal(t, LoadbalanceSticky, loadbalance)
|
|
||||||
require.False(t, r.Qemu.Install)
|
|
||||||
require.Equal(t, bkimage.QemuImage, r.Qemu.Image)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
t.Run(
|
|
||||||
"RootlessOverride", func(t *testing.T) {
|
|
||||||
cfg.DriverOpts = map[string]string{
|
|
||||||
"rootless": "true",
|
|
||||||
"loadbalance": "sticky",
|
|
||||||
}
|
|
||||||
|
|
||||||
r, loadbalance, ns, err := f.processDriverOpts(cfg.Name, "test", cfg)
|
|
||||||
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
require.Equal(t, "test", ns)
|
|
||||||
require.Equal(t, bkimage.DefaultRootlessImage, r.Image)
|
|
||||||
require.Equal(t, 1, r.Replicas)
|
|
||||||
require.Equal(t, "", r.RequestsCPU)
|
|
||||||
require.Equal(t, "", r.RequestsMemory)
|
|
||||||
require.Equal(t, "", r.LimitsCPU)
|
|
||||||
require.Equal(t, "", r.LimitsMemory)
|
|
||||||
require.True(t, r.Rootless)
|
|
||||||
require.Empty(t, r.NodeSelector)
|
|
||||||
require.Empty(t, r.Tolerations)
|
|
||||||
require.Equal(t, LoadbalanceSticky, loadbalance)
|
|
||||||
require.False(t, r.Qemu.Install)
|
|
||||||
require.Equal(t, bkimage.QemuImage, r.Qemu.Image)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
t.Run(
|
|
||||||
"InvalidReplicas", func(t *testing.T) {
|
|
||||||
cfg.DriverOpts = map[string]string{
|
|
||||||
"replicas": "invalid",
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, _, err := f.processDriverOpts(cfg.Name, "test", cfg)
|
|
||||||
|
|
||||||
require.Error(t, err)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
t.Run(
|
|
||||||
"InvalidRootless", func(t *testing.T) {
|
|
||||||
cfg.DriverOpts = map[string]string{
|
|
||||||
"rootless": "invalid",
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, _, err := f.processDriverOpts(cfg.Name, "test", cfg)
|
|
||||||
|
|
||||||
require.Error(t, err)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
t.Run(
|
|
||||||
"InvalidTolerationKeyword", func(t *testing.T) {
|
|
||||||
cfg.DriverOpts = map[string]string{
|
|
||||||
"tolerations": "key=foo,value=bar,invalid=foo2",
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, _, err := f.processDriverOpts(cfg.Name, "test", cfg)
|
|
||||||
|
|
||||||
require.Error(t, err)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
t.Run(
|
|
||||||
"InvalidTolerationSeconds", func(t *testing.T) {
|
|
||||||
cfg.DriverOpts = map[string]string{
|
|
||||||
"tolerations": "key=foo,value=bar,tolerationSeconds=invalid",
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, _, err := f.processDriverOpts(cfg.Name, "test", cfg)
|
|
||||||
|
|
||||||
require.Error(t, err)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
t.Run(
|
|
||||||
"InvalidLoadBalance", func(t *testing.T) {
|
|
||||||
cfg.DriverOpts = map[string]string{
|
|
||||||
"loadbalance": "invalid",
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, _, err := f.processDriverOpts(cfg.Name, "test", cfg)
|
|
||||||
|
|
||||||
require.Error(t, err)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
t.Run(
|
|
||||||
"InvalidQemuInstall", func(t *testing.T) {
|
|
||||||
cfg.DriverOpts = map[string]string{
|
|
||||||
"qemu.install": "invalid",
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, _, err := f.processDriverOpts(cfg.Name, "test", cfg)
|
|
||||||
|
|
||||||
require.Error(t, err)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
t.Run(
|
|
||||||
"InvalidOption", func(t *testing.T) {
|
|
||||||
cfg.DriverOpts = map[string]string{
|
|
||||||
"invalid": "foo",
|
|
||||||
}
|
|
||||||
|
|
||||||
_, _, _, err := f.processDriverOpts(cfg.Name, "test", cfg)
|
|
||||||
|
|
||||||
require.Error(t, err)
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
@@ -32,7 +32,6 @@ type DeploymentOpt struct {
|
|||||||
|
|
||||||
Rootless bool
|
Rootless bool
|
||||||
NodeSelector map[string]string
|
NodeSelector map[string]string
|
||||||
Tolerations []corev1.Toleration
|
|
||||||
RequestsCPU string
|
RequestsCPU string
|
||||||
RequestsMemory string
|
RequestsMemory string
|
||||||
LimitsCPU string
|
LimitsCPU string
|
||||||
@@ -160,10 +159,6 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
|
|||||||
d.Spec.Template.Spec.NodeSelector = opt.NodeSelector
|
d.Spec.Template.Spec.NodeSelector = opt.NodeSelector
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(opt.Tolerations) > 0 {
|
|
||||||
d.Spec.Template.Spec.Tolerations = opt.Tolerations
|
|
||||||
}
|
|
||||||
|
|
||||||
if opt.RequestsCPU != "" {
|
if opt.RequestsCPU != "" {
|
||||||
reqCPU, err := resource.ParseQuantity(opt.RequestsCPU)
|
reqCPU, err := resource.ParseQuantity(opt.RequestsCPU)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -204,15 +199,12 @@ func toRootless(d *appsv1.Deployment) error {
|
|||||||
d.Spec.Template.Spec.Containers[0].Args,
|
d.Spec.Template.Spec.Containers[0].Args,
|
||||||
"--oci-worker-no-process-sandbox",
|
"--oci-worker-no-process-sandbox",
|
||||||
)
|
)
|
||||||
d.Spec.Template.Spec.Containers[0].SecurityContext = &corev1.SecurityContext{
|
d.Spec.Template.Spec.Containers[0].SecurityContext = nil
|
||||||
SeccompProfile: &corev1.SeccompProfile{
|
|
||||||
Type: corev1.SeccompProfileTypeUnconfined,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if d.Spec.Template.ObjectMeta.Annotations == nil {
|
if d.Spec.Template.ObjectMeta.Annotations == nil {
|
||||||
d.Spec.Template.ObjectMeta.Annotations = make(map[string]string, 1)
|
d.Spec.Template.ObjectMeta.Annotations = make(map[string]string, 2)
|
||||||
}
|
}
|
||||||
d.Spec.Template.ObjectMeta.Annotations["container.apparmor.security.beta.kubernetes.io/"+containerName] = "unconfined"
|
d.Spec.Template.ObjectMeta.Annotations["container.apparmor.security.beta.kubernetes.io/"+containerName] = "unconfined"
|
||||||
|
d.Spec.Template.ObjectMeta.Annotations["container.seccomp.security.alpha.kubernetes.io/"+containerName] = "unconfined"
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/serialx/hashring"
|
"github.com/serialx/hashring"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
@@ -30,9 +29,6 @@ func (pc *RandomPodChooser) ChoosePod(ctx context.Context) (*corev1.Pod, error)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(pods) == 0 {
|
|
||||||
return nil, errors.New("no running buildkit pods found")
|
|
||||||
}
|
|
||||||
randSource := pc.RandSource
|
randSource := pc.RandSource
|
||||||
if randSource == nil {
|
if randSource == nil {
|
||||||
randSource = rand.NewSource(time.Now().Unix())
|
randSource = rand.NewSource(time.Now().Unix())
|
||||||
|
|||||||
@@ -2,7 +2,7 @@ package driver
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"os"
|
"io/ioutil"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@@ -18,7 +18,7 @@ import (
|
|||||||
type Factory interface {
|
type Factory interface {
|
||||||
Name() string
|
Name() string
|
||||||
Usage() string
|
Usage() string
|
||||||
Priority(ctx context.Context, endpoint string, api dockerclient.APIClient) int
|
Priority(context.Context, dockerclient.APIClient) int
|
||||||
New(ctx context.Context, cfg InitConfig) (Driver, error)
|
New(ctx context.Context, cfg InitConfig) (Driver, error)
|
||||||
AllowsInstances() bool
|
AllowsInstances() bool
|
||||||
}
|
}
|
||||||
@@ -40,7 +40,7 @@ func (k KubeClientConfigInCluster) ClientConfig() (*rest.Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (k KubeClientConfigInCluster) Namespace() (string, bool, error) {
|
func (k KubeClientConfigInCluster) Namespace() (string, bool, error) {
|
||||||
namespace, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
namespace, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, err
|
return "", false, err
|
||||||
}
|
}
|
||||||
@@ -50,7 +50,6 @@ func (k KubeClientConfigInCluster) Namespace() (string, bool, error) {
|
|||||||
type InitConfig struct {
|
type InitConfig struct {
|
||||||
// This object needs updates to be generic for different drivers
|
// This object needs updates to be generic for different drivers
|
||||||
Name string
|
Name string
|
||||||
EndpointAddr string
|
|
||||||
DockerAPI dockerclient.APIClient
|
DockerAPI dockerclient.APIClient
|
||||||
KubeClientConfig KubeClientConfig
|
KubeClientConfig KubeClientConfig
|
||||||
BuildkitFlags []string
|
BuildkitFlags []string
|
||||||
@@ -71,7 +70,7 @@ func Register(f Factory) {
|
|||||||
drivers[f.Name()] = f
|
drivers[f.Name()] = f
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetDefaultFactory(ctx context.Context, ep string, c dockerclient.APIClient, instanceRequired bool) (Factory, error) {
|
func GetDefaultFactory(ctx context.Context, c dockerclient.APIClient, instanceRequired bool) (Factory, error) {
|
||||||
if len(drivers) == 0 {
|
if len(drivers) == 0 {
|
||||||
return nil, errors.Errorf("no drivers available")
|
return nil, errors.Errorf("no drivers available")
|
||||||
}
|
}
|
||||||
@@ -84,7 +83,7 @@ func GetDefaultFactory(ctx context.Context, ep string, c dockerclient.APIClient,
|
|||||||
if instanceRequired && !f.AllowsInstances() {
|
if instanceRequired && !f.AllowsInstances() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
dd = append(dd, p{f: f, priority: f.Priority(ctx, ep, c)})
|
dd = append(dd, p{f: f, priority: f.Priority(ctx, c)})
|
||||||
}
|
}
|
||||||
sort.Slice(dd, func(i, j int) bool {
|
sort.Slice(dd, func(i, j int) bool {
|
||||||
return dd[i].priority < dd[j].priority
|
return dd[i].priority < dd[j].priority
|
||||||
@@ -92,21 +91,20 @@ func GetDefaultFactory(ctx context.Context, ep string, c dockerclient.APIClient,
|
|||||||
return dd[0].f, nil
|
return dd[0].f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetFactory(name string, instanceRequired bool) (Factory, error) {
|
func GetFactory(name string, instanceRequired bool) Factory {
|
||||||
for _, f := range drivers {
|
for _, f := range drivers {
|
||||||
|
if instanceRequired && !f.AllowsInstances() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if f.Name() == name {
|
if f.Name() == name {
|
||||||
if instanceRequired && !f.AllowsInstances() {
|
return f
|
||||||
return nil, errors.Errorf("additional instances of driver %q cannot be created", name)
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, errors.Errorf("failed to find driver %q", name)
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetDriver(ctx context.Context, name string, f Factory, endpointAddr string, api dockerclient.APIClient, auth Auth, kcc KubeClientConfig, flags []string, files map[string][]byte, do map[string]string, platforms []specs.Platform, contextPathHash string) (Driver, error) {
|
func GetDriver(ctx context.Context, name string, f Factory, api dockerclient.APIClient, auth Auth, kcc KubeClientConfig, flags []string, files map[string][]byte, do map[string]string, platforms []specs.Platform, contextPathHash string) (Driver, error) {
|
||||||
ic := InitConfig{
|
ic := InitConfig{
|
||||||
EndpointAddr: endpointAddr,
|
|
||||||
DockerAPI: api,
|
DockerAPI: api,
|
||||||
KubeClientConfig: kcc,
|
KubeClientConfig: kcc,
|
||||||
Name: name,
|
Name: name,
|
||||||
@@ -119,7 +117,7 @@ func GetDriver(ctx context.Context, name string, f Factory, endpointAddr string,
|
|||||||
}
|
}
|
||||||
if f == nil {
|
if f == nil {
|
||||||
var err error
|
var err error
|
||||||
f, err = GetDefaultFactory(ctx, endpointAddr, api, false)
|
f, err = GetDefaultFactory(ctx, api, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -131,12 +129,9 @@ func GetDriver(ctx context.Context, name string, f Factory, endpointAddr string,
|
|||||||
return &cachedDriver{Driver: d}, nil
|
return &cachedDriver{Driver: d}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetFactories(instanceRequired bool) []Factory {
|
func GetFactories() []Factory {
|
||||||
ds := make([]Factory, 0, len(drivers))
|
ds := make([]Factory, 0, len(drivers))
|
||||||
for _, d := range drivers {
|
for _, d := range drivers {
|
||||||
if instanceRequired && !d.AllowsInstances() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ds = append(ds, d)
|
ds = append(ds, d)
|
||||||
}
|
}
|
||||||
sort.Slice(ds, func(i, j int) bool {
|
sort.Slice(ds, func(i, j int) bool {
|
||||||
|
|||||||
@@ -1,106 +0,0 @@
|
|||||||
package remote
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/driver"
|
|
||||||
"github.com/docker/buildx/util/progress"
|
|
||||||
"github.com/moby/buildkit/client"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Driver struct {
|
|
||||||
factory driver.Factory
|
|
||||||
driver.InitConfig
|
|
||||||
*tlsOpts
|
|
||||||
}
|
|
||||||
|
|
||||||
type tlsOpts struct {
|
|
||||||
serverName string
|
|
||||||
caCert string
|
|
||||||
cert string
|
|
||||||
key string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
|
||||||
for i := 0; ; i++ {
|
|
||||||
info, err := d.Info(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if info.Status != driver.Inactive {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
default:
|
|
||||||
if i > 10 {
|
|
||||||
i = 10
|
|
||||||
}
|
|
||||||
time.Sleep(time.Duration(i) * time.Second)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
|
||||||
c, err := d.Client(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return &driver.Info{
|
|
||||||
Status: driver.Inactive,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := c.ListWorkers(ctx); err != nil {
|
|
||||||
return &driver.Info{
|
|
||||||
Status: driver.Inactive,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &driver.Info{
|
|
||||||
Status: driver.Running,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Driver) Version(ctx context.Context) (string, error) {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Driver) Stop(ctx context.Context, force bool) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
|
|
||||||
opts := []client.ClientOpt{}
|
|
||||||
if d.tlsOpts != nil {
|
|
||||||
opts = append(opts, client.WithCredentials(d.tlsOpts.serverName, d.tlsOpts.caCert, d.tlsOpts.cert, d.tlsOpts.key))
|
|
||||||
}
|
|
||||||
|
|
||||||
return client.New(ctx, d.InitConfig.EndpointAddr, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Driver) Features() map[driver.Feature]bool {
|
|
||||||
return map[driver.Feature]bool{
|
|
||||||
driver.OCIExporter: true,
|
|
||||||
driver.DockerExporter: false,
|
|
||||||
driver.CacheExport: true,
|
|
||||||
driver.MultiPlatform: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Driver) Factory() driver.Factory {
|
|
||||||
return d.factory
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Driver) IsMobyDriver() bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Driver) Config() driver.InitConfig {
|
|
||||||
return d.InitConfig
|
|
||||||
}
|
|
||||||
@@ -1,118 +0,0 @@
|
|||||||
package remote
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net/url"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
// import connhelpers for special url schemes
|
|
||||||
_ "github.com/moby/buildkit/client/connhelper/dockercontainer"
|
|
||||||
_ "github.com/moby/buildkit/client/connhelper/kubepod"
|
|
||||||
_ "github.com/moby/buildkit/client/connhelper/ssh"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/driver"
|
|
||||||
util "github.com/docker/buildx/driver/remote/util"
|
|
||||||
dockerclient "github.com/docker/docker/client"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
const prioritySupported = 20
|
|
||||||
const priorityUnsupported = 90
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
driver.Register(&factory{})
|
|
||||||
}
|
|
||||||
|
|
||||||
type factory struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*factory) Name() string {
|
|
||||||
return "remote"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*factory) Usage() string {
|
|
||||||
return "remote"
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*factory) Priority(ctx context.Context, endpoint string, api dockerclient.APIClient) int {
|
|
||||||
if util.IsValidEndpoint(endpoint) != nil {
|
|
||||||
return priorityUnsupported
|
|
||||||
}
|
|
||||||
return prioritySupported
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver, error) {
|
|
||||||
if len(cfg.Files) > 0 {
|
|
||||||
return nil, errors.Errorf("setting config file is not supported for remote driver")
|
|
||||||
}
|
|
||||||
if len(cfg.BuildkitFlags) > 0 {
|
|
||||||
return nil, errors.Errorf("setting buildkit flags is not supported for remote driver")
|
|
||||||
}
|
|
||||||
|
|
||||||
d := &Driver{
|
|
||||||
factory: f,
|
|
||||||
InitConfig: cfg,
|
|
||||||
}
|
|
||||||
|
|
||||||
tls := &tlsOpts{}
|
|
||||||
tlsEnabled := false
|
|
||||||
for k, v := range cfg.DriverOpts {
|
|
||||||
switch k {
|
|
||||||
case "servername":
|
|
||||||
tls.serverName = v
|
|
||||||
tlsEnabled = true
|
|
||||||
case "cacert":
|
|
||||||
if !filepath.IsAbs(v) {
|
|
||||||
return nil, errors.Errorf("non-absolute path '%s' provided for %s", v, k)
|
|
||||||
}
|
|
||||||
tls.caCert = v
|
|
||||||
tlsEnabled = true
|
|
||||||
case "cert":
|
|
||||||
if !filepath.IsAbs(v) {
|
|
||||||
return nil, errors.Errorf("non-absolute path '%s' provided for %s", v, k)
|
|
||||||
}
|
|
||||||
tls.cert = v
|
|
||||||
tlsEnabled = true
|
|
||||||
case "key":
|
|
||||||
if !filepath.IsAbs(v) {
|
|
||||||
return nil, errors.Errorf("non-absolute path '%s' provided for %s", v, k)
|
|
||||||
}
|
|
||||||
tls.key = v
|
|
||||||
tlsEnabled = true
|
|
||||||
default:
|
|
||||||
return nil, errors.Errorf("invalid driver option %s for remote driver", k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if tlsEnabled {
|
|
||||||
if tls.serverName == "" {
|
|
||||||
// guess servername as hostname of target address
|
|
||||||
uri, err := url.Parse(cfg.EndpointAddr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
tls.serverName = uri.Hostname()
|
|
||||||
}
|
|
||||||
missing := []string{}
|
|
||||||
if tls.caCert == "" {
|
|
||||||
missing = append(missing, "cacert")
|
|
||||||
}
|
|
||||||
if tls.cert == "" {
|
|
||||||
missing = append(missing, "cert")
|
|
||||||
}
|
|
||||||
if tls.key == "" {
|
|
||||||
missing = append(missing, "key")
|
|
||||||
}
|
|
||||||
if len(missing) > 0 {
|
|
||||||
return nil, errors.Errorf("tls enabled, but missing keys %s", strings.Join(missing, ", "))
|
|
||||||
}
|
|
||||||
d.tlsOpts = tls
|
|
||||||
}
|
|
||||||
|
|
||||||
return d, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *factory) AllowsInstances() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
@@ -1,26 +0,0 @@
|
|||||||
package remote
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/url"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var schemes = map[string]struct{}{
|
|
||||||
"tcp": {},
|
|
||||||
"unix": {},
|
|
||||||
"ssh": {},
|
|
||||||
"docker-container": {},
|
|
||||||
"kube-pod": {},
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsValidEndpoint(ep string) error {
|
|
||||||
endpoint, err := url.Parse(ep)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to parse endpoint %s", ep)
|
|
||||||
}
|
|
||||||
if _, ok := schemes[endpoint.Scheme]; !ok {
|
|
||||||
return errors.Errorf("unrecognized url scheme %s", endpoint.Scheme)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
176
go.mod
176
go.mod
@@ -1,163 +1,65 @@
|
|||||||
module github.com/docker/buildx
|
module github.com/docker/buildx
|
||||||
|
|
||||||
go 1.17
|
go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/compose-spec/compose-go v1.4.0
|
|
||||||
github.com/containerd/console v1.0.3
|
|
||||||
github.com/containerd/containerd v1.6.6
|
|
||||||
github.com/docker/cli v20.10.17+incompatible // v22.06.x - see "replace" for the actual version
|
|
||||||
github.com/docker/cli-docs-tool v0.5.0
|
|
||||||
github.com/docker/distribution v2.8.1+incompatible
|
|
||||||
github.com/docker/docker v20.10.17+incompatible // v22.06.x - see "replace" for the actual version
|
|
||||||
github.com/docker/go-units v0.4.0
|
|
||||||
github.com/gofrs/flock v0.7.3
|
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
|
||||||
github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840
|
|
||||||
github.com/hashicorp/hcl/v2 v2.8.2
|
|
||||||
github.com/moby/buildkit v0.10.1-0.20220816171719-55ba9d14360a
|
|
||||||
github.com/morikuni/aec v1.0.0
|
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
|
||||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799
|
|
||||||
github.com/pelletier/go-toml v1.9.4
|
|
||||||
github.com/pkg/errors v0.9.1
|
|
||||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
|
||||||
github.com/sirupsen/logrus v1.9.0
|
|
||||||
github.com/spf13/cobra v1.5.0
|
|
||||||
github.com/spf13/pflag v1.0.5
|
|
||||||
github.com/stretchr/testify v1.8.0
|
|
||||||
github.com/zclconf/go-cty v1.10.0
|
|
||||||
go.opentelemetry.io/otel v1.4.1
|
|
||||||
go.opentelemetry.io/otel/trace v1.4.1
|
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
|
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
|
|
||||||
google.golang.org/grpc v1.47.0
|
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
|
||||||
k8s.io/api v0.23.5
|
|
||||||
k8s.io/apimachinery v0.23.5
|
|
||||||
k8s.io/client-go v0.23.5
|
|
||||||
)
|
|
||||||
|
|
||||||
require (
|
|
||||||
cloud.google.com/go v0.81.0 // indirect
|
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
|
||||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
|
||||||
github.com/Azure/go-autorest/autorest v0.11.24 // indirect
|
|
||||||
github.com/Azure/go-autorest/autorest/adal v0.9.18 // indirect
|
|
||||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
|
||||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
|
||||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
|
||||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
|
||||||
github.com/agext/levenshtein v1.2.3 // indirect
|
|
||||||
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
|
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
|
||||||
github.com/apparentlymart/go-cidr v1.0.1 // indirect
|
|
||||||
github.com/apparentlymart/go-textseg/v12 v12.0.0 // indirect
|
|
||||||
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
|
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
|
||||||
github.com/bugsnag/bugsnag-go v1.4.1 // indirect
|
github.com/bugsnag/bugsnag-go v1.4.1 // indirect
|
||||||
github.com/bugsnag/panicwrap v1.2.0 // indirect
|
github.com/bugsnag/panicwrap v1.2.0 // indirect
|
||||||
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
|
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
|
||||||
github.com/cenkalti/backoff/v4 v4.1.2 // indirect
|
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
|
||||||
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
|
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
|
||||||
github.com/containerd/continuity v0.3.0 // indirect
|
github.com/compose-spec/compose-go v1.0.5
|
||||||
github.com/containerd/ttrpc v1.1.0 // indirect
|
github.com/containerd/console v1.0.3
|
||||||
github.com/containerd/typeurl v1.0.2 // indirect
|
github.com/containerd/containerd v1.5.5
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/docker/cli v20.10.8+incompatible
|
||||||
github.com/distribution/distribution/v3 v3.0.0-20220725133111-4bf3547399eb // indirect
|
github.com/docker/cli-docs-tool v0.2.1
|
||||||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
github.com/docker/compose-on-kubernetes v0.4.19-0.20190128150448-356b2919c496 // indirect
|
||||||
|
github.com/docker/distribution v2.7.1+incompatible
|
||||||
|
github.com/docker/docker v20.10.7+incompatible
|
||||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||||
github.com/docker/go-connections v0.4.0 // indirect
|
github.com/docker/go-units v0.4.0
|
||||||
github.com/docker/go-metrics v0.0.1 // indirect
|
|
||||||
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 // indirect
|
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 // indirect
|
||||||
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect
|
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.2 // indirect
|
|
||||||
github.com/fvbommel/sortorder v1.0.1 // indirect
|
github.com/fvbommel/sortorder v1.0.1 // indirect
|
||||||
github.com/go-logr/logr v1.2.2 // indirect
|
github.com/gofrs/flock v0.7.3
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/gofrs/uuid v3.3.0+incompatible // indirect
|
||||||
github.com/go-sql-driver/mysql v1.6.0 // indirect
|
|
||||||
github.com/gogo/googleapis v1.4.1 // indirect
|
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
|
||||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
|
||||||
github.com/google/certificate-transparency-go v1.0.21 // indirect
|
github.com/google/certificate-transparency-go v1.0.21 // indirect
|
||||||
github.com/google/go-cmp v0.5.8 // indirect
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||||
github.com/google/gofuzz v1.2.0 // indirect
|
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
|
||||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
|
||||||
github.com/gorilla/mux v1.8.0 // indirect
|
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
|
|
||||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
||||||
github.com/imdario/mergo v0.3.13 // indirect
|
github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
github.com/hashicorp/hcl/v2 v2.8.2
|
||||||
github.com/jinzhu/gorm v1.9.2 // indirect
|
github.com/jinzhu/gorm v1.9.2 // indirect
|
||||||
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
|
||||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||||
github.com/klauspost/compress v1.15.7 // indirect
|
github.com/moby/buildkit v0.9.1-0.20211019185819-8778943ac3da
|
||||||
github.com/kr/pretty v0.3.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
|
github.com/pelletier/go-toml v1.9.4
|
||||||
github.com/miekg/pkcs11 v1.1.1 // indirect
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
|
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
||||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/moby/locker v1.0.1 // indirect
|
github.com/spf13/cobra v1.2.1
|
||||||
github.com/moby/spdystream v0.2.0 // indirect
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/moby/sys/signal v0.6.0 // indirect
|
github.com/stretchr/testify v1.7.0
|
||||||
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
|
||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
|
||||||
github.com/opencontainers/runc v1.1.3 // indirect
|
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
|
||||||
github.com/prometheus/client_golang v1.12.2 // indirect
|
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
|
||||||
github.com/prometheus/common v0.32.1 // indirect
|
|
||||||
github.com/prometheus/procfs v0.7.3 // indirect
|
|
||||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
|
||||||
github.com/theupdateframework/notary v0.6.1 // indirect
|
github.com/theupdateframework/notary v0.6.1 // indirect
|
||||||
github.com/tonistiigi/fsutil v0.0.0-20220510150904-0dbf3a8a7d58 // indirect
|
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
||||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
|
github.com/zclconf/go-cty v1.7.1
|
||||||
github.com/tonistiigi/vt100 v0.0.0-20210615222946-8066bb97264f // indirect
|
go.opentelemetry.io/otel v1.0.0-RC1
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
go.opentelemetry.io/otel/trace v1.0.0-RC1
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 // indirect
|
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 // indirect
|
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1 // indirect
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 // indirect
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.4.1 // indirect
|
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.4.1 // indirect
|
|
||||||
go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/metric v0.27.0 // indirect
|
|
||||||
go.opentelemetry.io/otel/sdk v1.4.1 // indirect
|
|
||||||
go.opentelemetry.io/proto/otlp v0.12.0 // indirect
|
|
||||||
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect
|
|
||||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect
|
|
||||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 // indirect
|
|
||||||
golang.org/x/text v0.3.7 // indirect
|
|
||||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
|
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
|
||||||
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6 // indirect
|
|
||||||
google.golang.org/protobuf v1.27.1 // indirect
|
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
|
||||||
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
||||||
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
|
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
|
||||||
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
k8s.io/api v0.22.1
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
k8s.io/apimachinery v0.22.1
|
||||||
k8s.io/klog/v2 v2.30.0 // indirect
|
k8s.io/client-go v0.22.1
|
||||||
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect
|
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
|
|
||||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
|
||||||
)
|
)
|
||||||
|
|
||||||
replace (
|
replace (
|
||||||
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20220803220330-418ca3b4d46f+incompatible // master (v22.06-dev)
|
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20210702143511-f782d1355eff+incompatible
|
||||||
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220720171342-a60b458179aa+incompatible // 22.06 branch (v22.06-dev)
|
github.com/docker/docker => github.com/tonistiigi/docker v0.10.1-0.20211122204227-65a6f25dbca2
|
||||||
k8s.io/api => k8s.io/api v0.22.4
|
github.com/tonistiigi/fsutil => github.com/tonistiigi/fsutil v0.0.0-20211122210416-da5201e0b3af
|
||||||
k8s.io/apimachinery => k8s.io/apimachinery v0.22.4
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.0.0-20210714055410-d010b05b4939
|
||||||
k8s.io/client-go => k8s.io/client-go v0.22.4
|
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/net/http/httptrace/otelhttptrace v0.0.0-20210714055410-d010b05b4939
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/net/http/otelhttp v0.0.0-20210714055410-d010b05b4939
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# syntax=docker/dockerfile:1.4
|
# syntax=docker/dockerfile:1.3-labs
|
||||||
|
|
||||||
FROM alpine:3.14 AS gen
|
FROM alpine:3.14 AS gen
|
||||||
RUN apk add --no-cache git
|
RUN apk add --no-cache git
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# syntax=docker/dockerfile:1.4
|
# syntax=docker/dockerfile:1.3-labs
|
||||||
|
|
||||||
ARG GO_VERSION=1.18
|
ARG GO_VERSION=1.17
|
||||||
ARG FORMATS=md,yaml
|
ARG FORMATS=md,yaml
|
||||||
|
|
||||||
FROM golang:${GO_VERSION}-alpine AS docsgen
|
FROM golang:${GO_VERSION}-alpine AS docsgen
|
||||||
|
|||||||
@@ -1,10 +1,12 @@
|
|||||||
# syntax=docker/dockerfile:1.4
|
# syntax=docker/dockerfile:1.3
|
||||||
|
|
||||||
ARG GO_VERSION=1.18
|
ARG GO_VERSION=1.17
|
||||||
|
|
||||||
FROM golang:${GO_VERSION}-alpine
|
FROM golang:${GO_VERSION}-alpine
|
||||||
RUN apk add --no-cache git gcc musl-dev
|
RUN apk add --no-cache gcc musl-dev yamllint
|
||||||
RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.45.2
|
RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.36.0
|
||||||
WORKDIR /go/src/github.com/docker/buildx
|
WORKDIR /go/src/github.com/docker/buildx
|
||||||
RUN --mount=target=/go/src/github.com/docker/buildx --mount=target=/root/.cache,type=cache \
|
RUN --mount=target=/go/src/github.com/docker/buildx --mount=target=/root/.cache,type=cache \
|
||||||
golangci-lint run
|
golangci-lint run
|
||||||
|
RUN --mount=target=/go/src/github.com/docker/buildx --mount=target=/root/.cache,type=cache \
|
||||||
|
yamllint -c .yamllint.yml --strict .
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user