Compare commits
5 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
266c0eac61 | ||
|
|
0b320faf34 | ||
|
|
944fd444f6 | ||
|
|
de7dfb925d | ||
|
|
43e51fd089 |
@@ -1,3 +1,3 @@
|
|||||||
bin/
|
bin/
|
||||||
cross-out/
|
cross-out/
|
||||||
release-out/
|
release-out/
|
||||||
169
.github/workflows/build.yml
vendored
169
.github/workflows/build.yml
vendored
@@ -15,34 +15,158 @@ on:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
REPO_SLUG: "docker/buildx-bin"
|
REPO_SLUG: "docker/buildx-bin"
|
||||||
RELEASE_OUT: "./release-out"
|
REPO_SLUG_ORIGIN: "moby/buildkit:master"
|
||||||
|
CACHEKEY_BINARIES: "binaries"
|
||||||
|
PLATFORMS: "linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64,linux/s390x,linux/ppc64le,linux/riscv64"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
base:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Cache ${{ env.CACHEKEY_BINARIES }}
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||||
|
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
|
||||||
-
|
-
|
||||||
name: Set up QEMU
|
name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v1
|
uses: docker/setup-qemu-action@v1
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
uses: docker/setup-buildx-action@v1
|
uses: docker/setup-buildx-action@v1
|
||||||
|
with:
|
||||||
|
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
||||||
|
-
|
||||||
|
name: Build ${{ env.CACHEKEY_BINARIES }}
|
||||||
|
run: |
|
||||||
|
./hack/build_ci_first_pass binaries
|
||||||
|
env:
|
||||||
|
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||||
|
CACHEDIR_TO: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}-new
|
||||||
|
-
|
||||||
|
# FIXME: Temp fix for https://github.com/moby/buildkit/issues/1850
|
||||||
|
name: Move cache
|
||||||
|
run: |
|
||||||
|
rm -rf /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||||
|
mv /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}-new /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||||
|
|
||||||
|
test:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [base]
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Cache ${{ env.CACHEKEY_BINARIES }}
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||||
|
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
|
||||||
|
-
|
||||||
|
name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v1
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
with:
|
||||||
|
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
||||||
-
|
-
|
||||||
name: Test
|
name: Test
|
||||||
run: |
|
run: |
|
||||||
make test
|
make test
|
||||||
|
env:
|
||||||
|
TEST_COVERAGE: 1
|
||||||
|
TESTFLAGS: -v --parallel=6 --timeout=20m
|
||||||
|
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||||
-
|
-
|
||||||
name: Send to Codecov
|
name: Send to Codecov
|
||||||
uses: codecov/codecov-action@v2
|
uses: codecov/codecov-action@v2
|
||||||
with:
|
with:
|
||||||
file: ./coverage/coverage.txt
|
file: ./coverage/coverage.txt
|
||||||
|
|
||||||
|
cross:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [base]
|
||||||
|
steps:
|
||||||
-
|
-
|
||||||
name: Build binaries
|
name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Cache ${{ env.CACHEKEY_BINARIES }}
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||||
|
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
|
||||||
|
-
|
||||||
|
name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v1
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
with:
|
||||||
|
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
||||||
|
-
|
||||||
|
name: Cross
|
||||||
run: |
|
run: |
|
||||||
make release
|
make cross
|
||||||
|
env:
|
||||||
|
TARGETPLATFORM: ${{ env.PLATFORMS }},darwin/amd64,darwin/arm64,windows/amd64,windows/arm64
|
||||||
|
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||||
|
|
||||||
|
binaries:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: [test, cross]
|
||||||
|
env:
|
||||||
|
RELEASE_OUT: ./release-out
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Prepare
|
||||||
|
id: prep
|
||||||
|
run: |
|
||||||
|
TAG=pr
|
||||||
|
if [[ $GITHUB_REF == refs/tags/v* ]]; then
|
||||||
|
TAG=${GITHUB_REF#refs/tags/}
|
||||||
|
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||||
|
TAG=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||||
|
fi
|
||||||
|
echo ::set-output name=tag::${TAG}
|
||||||
|
-
|
||||||
|
name: Cache ${{ env.CACHEKEY_BINARIES }}
|
||||||
|
uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||||
|
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
|
||||||
|
-
|
||||||
|
name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v1
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
with:
|
||||||
|
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
||||||
|
-
|
||||||
|
name: Build ${{ steps.prep.outputs.tag }}
|
||||||
|
run: |
|
||||||
|
./hack/release ${{ env.RELEASE_OUT }}
|
||||||
|
env:
|
||||||
|
PLATFORMS: ${{ env.PLATFORMS }},darwin/amd64,darwin/arm64,windows/amd64,windows/arm64
|
||||||
|
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||||
-
|
-
|
||||||
name: Upload artifacts
|
name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v2
|
||||||
@@ -61,7 +185,6 @@ jobs:
|
|||||||
type=ref,event=branch
|
type=ref,event=branch
|
||||||
type=ref,event=pr
|
type=ref,event=pr
|
||||||
type=semver,pattern={{version}}
|
type=semver,pattern={{version}}
|
||||||
bake-target: meta-helper
|
|
||||||
-
|
-
|
||||||
name: Login to DockerHub
|
name: Login to DockerHub
|
||||||
if: github.event_name != 'pull_request'
|
if: github.event_name != 'pull_request'
|
||||||
@@ -71,13 +194,15 @@ jobs:
|
|||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
-
|
-
|
||||||
name: Build and push image
|
name: Build and push image
|
||||||
uses: docker/bake-action@v1
|
uses: docker/build-push-action@v2
|
||||||
with:
|
with:
|
||||||
files: |
|
context: .
|
||||||
./docker-bake.hcl
|
target: binaries
|
||||||
${{ steps.meta.outputs.bake-file }}
|
|
||||||
targets: image-cross
|
|
||||||
push: ${{ github.event_name != 'pull_request' }}
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
cache-from: type=local,src=/tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
||||||
|
platforms: ${{ env.PLATFORMS }},darwin/amd64,darwin/arm64,windows/amd64,windows/arm64
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
-
|
-
|
||||||
name: GitHub Release
|
name: GitHub Release
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
@@ -87,26 +212,4 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
draft: true
|
draft: true
|
||||||
files: ${{ env.RELEASE_OUT }}/*
|
files: ${{ env.RELEASE_OUT }}/*
|
||||||
|
name: ${{ steps.prep.outputs.tag }}
|
||||||
buildkit-edge:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
continue-on-error: true
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
with:
|
|
||||||
driver-opts: image=moby/buildkit:master
|
|
||||||
buildkitd-flags: --debug
|
|
||||||
-
|
|
||||||
# Just run a bake target to check eveything runs fine
|
|
||||||
name: Build
|
|
||||||
uses: docker/bake-action@v1
|
|
||||||
with:
|
|
||||||
targets: binaries-cross
|
|
||||||
|
|||||||
101
.github/workflows/e2e.yml
vendored
101
.github/workflows/e2e.yml
vendored
@@ -1,101 +0,0 @@
|
|||||||
name: e2e
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'master'
|
|
||||||
- 'v[0-9]*'
|
|
||||||
pull_request:
|
|
||||||
branches:
|
|
||||||
- 'master'
|
|
||||||
- 'v[0-9]*'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
driver:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
driver:
|
|
||||||
- docker
|
|
||||||
- docker-container
|
|
||||||
- kubernetes
|
|
||||||
buildkit:
|
|
||||||
- moby/buildkit:buildx-stable-1
|
|
||||||
- moby/buildkit:master
|
|
||||||
buildkit-cfg:
|
|
||||||
- bkcfg-false
|
|
||||||
- bkcfg-true
|
|
||||||
multi-node:
|
|
||||||
- mnode-false
|
|
||||||
- mnode-true
|
|
||||||
platforms:
|
|
||||||
- linux/amd64
|
|
||||||
- linux/amd64,linux/arm64
|
|
||||||
include:
|
|
||||||
- driver: kubernetes
|
|
||||||
driver-opt: qemu.install=true
|
|
||||||
exclude:
|
|
||||||
- driver: docker
|
|
||||||
multi-node: mnode-true
|
|
||||||
- driver: docker
|
|
||||||
buildkit-cfg: bkcfg-true
|
|
||||||
- driver: docker-container
|
|
||||||
multi-node: mnode-true
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
if: matrix.driver == 'docker' || matrix.driver == 'docker-container'
|
|
||||||
-
|
|
||||||
name: Install buildx
|
|
||||||
run: |
|
|
||||||
make install
|
|
||||||
docker buildx version
|
|
||||||
-
|
|
||||||
name: Init env vars
|
|
||||||
run: |
|
|
||||||
# BuildKit cfg
|
|
||||||
if [ "${{ matrix.buildkit-cfg }}" = "bkcfg-true" ]; then
|
|
||||||
cat > "/tmp/buildkitd.toml" <<EOL
|
|
||||||
[worker.oci]
|
|
||||||
max-parallelism = 2
|
|
||||||
EOL
|
|
||||||
echo "BUILDKIT_CFG=/tmp/buildkitd.toml" >> $GITHUB_ENV
|
|
||||||
fi
|
|
||||||
# Multi node
|
|
||||||
if [ "${{ matrix.multi-node }}" = "mnode-true" ]; then
|
|
||||||
echo "MULTI_NODE=1" >> $GITHUB_ENV
|
|
||||||
else
|
|
||||||
echo "MULTI_NODE=0" >> $GITHUB_ENV
|
|
||||||
fi
|
|
||||||
-
|
|
||||||
name: Install k3s
|
|
||||||
if: matrix.driver == 'kubernetes'
|
|
||||||
uses: debianmaster/actions-k3s@v1.0.3
|
|
||||||
id: k3s
|
|
||||||
with:
|
|
||||||
version: v1.21.2-k3s1
|
|
||||||
-
|
|
||||||
name: Config k3s
|
|
||||||
if: matrix.driver == 'kubernetes'
|
|
||||||
run: |
|
|
||||||
(set -x ; cat ${{ steps.k3s.outputs.kubeconfig }})
|
|
||||||
-
|
|
||||||
name: Check k3s nodes
|
|
||||||
if: matrix.driver == 'kubernetes'
|
|
||||||
run: |
|
|
||||||
kubectl get nodes
|
|
||||||
-
|
|
||||||
name: Test
|
|
||||||
run: |
|
|
||||||
make test-driver
|
|
||||||
env:
|
|
||||||
BUILDKIT_IMAGE: ${{ matrix.buildkit }}
|
|
||||||
DRIVER: ${{ matrix.driver }}
|
|
||||||
DRIVER_OPT: ${{ matrix.driver-opt }}
|
|
||||||
PLATFORMS: ${{ matrix.platforms }}
|
|
||||||
23
.github/workflows/validate.yml
vendored
23
.github/workflows/validate.yml
vendored
@@ -13,6 +13,9 @@ on:
|
|||||||
- 'master'
|
- 'master'
|
||||||
- 'v[0-9]*'
|
- 'v[0-9]*'
|
||||||
|
|
||||||
|
env:
|
||||||
|
REPO_SLUG_ORIGIN: "moby/buildkit:master"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
validate:
|
validate:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -27,22 +30,12 @@ jobs:
|
|||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Set up Docker Buildx
|
||||||
|
uses: docker/setup-buildx-action@v1
|
||||||
|
with:
|
||||||
|
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
||||||
-
|
-
|
||||||
name: Run
|
name: Run
|
||||||
run: |
|
run: |
|
||||||
make ${{ matrix.target }}
|
make ${{ matrix.target }}
|
||||||
|
|
||||||
validate-docs-yaml:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs:
|
|
||||||
- validate
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
-
|
|
||||||
name: Run
|
|
||||||
run: |
|
|
||||||
make docs
|
|
||||||
env:
|
|
||||||
FORMATS: yaml
|
|
||||||
|
|||||||
9
.mailmap
9
.mailmap
@@ -1,13 +1,6 @@
|
|||||||
# This file lists all individuals having contributed content to the repository.
|
# This file lists all individuals having contributed content to the repository.
|
||||||
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
# For how it is generated, see `hack/generate-authors`.
|
||||||
|
|
||||||
CrazyMax <github@crazymax.dev>
|
|
||||||
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
|
||||||
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
|
||||||
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
|
||||||
Tibor Vass <tibor@docker.com>
|
Tibor Vass <tibor@docker.com>
|
||||||
Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com>
|
Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com>
|
||||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
Ulysses Souza <ulyssessouza@gmail.com>
|
|
||||||
Wang Jinglei <morlay.null@gmail.com>
|
|
||||||
|
|||||||
40
AUTHORS
40
AUTHORS
@@ -1,45 +1,7 @@
|
|||||||
# This file lists all individuals having contributed content to the repository.
|
# This file lists all individuals having contributed content to the repository.
|
||||||
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
# For how it is generated, see `scripts/generate-authors.sh`.
|
||||||
|
|
||||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
|
||||||
Alex Couture-Beil <alex@earthly.dev>
|
|
||||||
Andrew Haines <andrew.haines@zencargo.com>
|
|
||||||
Andy MacKinlay <admackin@users.noreply.github.com>
|
|
||||||
Anthony Poschen <zanven42@gmail.com>
|
|
||||||
Artur Klauser <Artur.Klauser@computer.org>
|
|
||||||
Batuhan Apaydın <developerguy2@gmail.com>
|
|
||||||
Bin Du <bindu@microsoft.com>
|
Bin Du <bindu@microsoft.com>
|
||||||
Brandon Philips <brandon@ifup.org>
|
|
||||||
Brian Goff <cpuguy83@gmail.com>
|
Brian Goff <cpuguy83@gmail.com>
|
||||||
CrazyMax <github@crazymax.dev>
|
|
||||||
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
|
|
||||||
Devin Bayer <dev@doubly.so>
|
|
||||||
Djordje Lukic <djordje.lukic@docker.com>
|
|
||||||
Dmytro Makovey <dmytro.makovey@docker.com>
|
|
||||||
Donghui Wang <977675308@qq.com>
|
|
||||||
faust <faustin@fala.red>
|
|
||||||
Felipe Santos <felipecassiors@gmail.com>
|
|
||||||
Fernando Miguel <github@FernandoMiguel.net>
|
|
||||||
gfrancesco <gfrancesco@users.noreply.github.com>
|
|
||||||
gracenoah <gracenoahgh@gmail.com>
|
|
||||||
Hollow Man <hollowman@hollowman.ml>
|
|
||||||
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
|
||||||
Jack Laxson <jackjrabbit@gmail.com>
|
|
||||||
Jean-Yves Gastaud <jygastaud@gmail.com>
|
|
||||||
khs1994 <khs1994@khs1994.com>
|
|
||||||
Kotaro Adachi <k33asby@gmail.com>
|
|
||||||
l00397676 <lujingxiao@huawei.com>
|
|
||||||
Michal Augustyn <michal.augustyn@mail.com>
|
|
||||||
Patrick Van Stee <patrick@vanstee.me>
|
|
||||||
Saul Shanabrook <s.shanabrook@gmail.com>
|
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
|
||||||
SHIMA Tatsuya <ts1s1andn@gmail.com>
|
|
||||||
Silvin Lubecki <silvin.lubecki@docker.com>
|
|
||||||
Solomon Hykes <sh.github.6811@hykes.org>
|
|
||||||
Sune Keller <absukl@almbrand.dk>
|
|
||||||
Tibor Vass <tibor@docker.com>
|
Tibor Vass <tibor@docker.com>
|
||||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
Ulysses Souza <ulyssessouza@gmail.com>
|
|
||||||
Wang Jinglei <morlay.null@gmail.com>
|
|
||||||
Xiang Dai <764524258@qq.com>
|
|
||||||
zelahi <elahi.zuhayr@gmail.com>
|
|
||||||
|
|||||||
60
Dockerfile
60
Dockerfile
@@ -1,16 +1,24 @@
|
|||||||
# syntax=docker/dockerfile:1.3
|
# syntax=docker/dockerfile:1.2
|
||||||
|
|
||||||
ARG GO_VERSION=1.17
|
ARG DOCKERD_VERSION=19.03
|
||||||
ARG DOCKERD_VERSION=20.10.8
|
ARG CLI_VERSION=19.03
|
||||||
|
|
||||||
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
||||||
|
|
||||||
# xx is a helper for cross-compilation
|
# xx is a helper for cross-compilation
|
||||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.0.0 AS xx
|
FROM --platform=$BUILDPLATFORM tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04 AS xx
|
||||||
|
|
||||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
|
FROM --platform=$BUILDPLATFORM golang:1.16-alpine AS golatest
|
||||||
|
|
||||||
FROM golatest AS gobase
|
FROM golatest AS go-linux
|
||||||
|
FROM golatest AS go-darwin
|
||||||
|
FROM golatest AS go-windows-amd64
|
||||||
|
FROM golatest AS go-windows-386
|
||||||
|
FROM golatest AS go-windows-arm
|
||||||
|
FROM --platform=$BUILDPLATFORM golang:1.17beta1-alpine AS go-windows-arm64
|
||||||
|
FROM go-windows-${TARGETARCH} AS go-windows
|
||||||
|
|
||||||
|
FROM go-${TARGETOS} AS gobase
|
||||||
COPY --from=xx / /
|
COPY --from=xx / /
|
||||||
RUN apk add --no-cache file git
|
RUN apk add --no-cache file git
|
||||||
ENV GOFLAGS=-mod=vendor
|
ENV GOFLAGS=-mod=vendor
|
||||||
@@ -24,24 +32,25 @@ RUN --mount=target=. \
|
|||||||
|
|
||||||
FROM gobase AS buildx-build
|
FROM gobase AS buildx-build
|
||||||
ENV CGO_ENABLED=0
|
ENV CGO_ENABLED=0
|
||||||
ARG LDFLAGS="-w -s"
|
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
RUN --mount=type=bind,target=. \
|
RUN --mount=target=. --mount=target=/root/.cache,type=cache \
|
||||||
--mount=type=cache,target=/root/.cache \
|
--mount=target=/go/pkg/mod,type=cache \
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
--mount=source=/tmp/.ldflags,target=/tmp/.ldflags,from=buildx-version \
|
||||||
--mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=buildx-version \
|
set -x; xx-go build -ldflags "$(cat /tmp/.ldflags)" -o /usr/bin/buildx ./cmd/buildx && \
|
||||||
set -x; xx-go build -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/buildx ./cmd/buildx && \
|
|
||||||
xx-verify --static /usr/bin/buildx
|
xx-verify --static /usr/bin/buildx
|
||||||
|
|
||||||
FROM buildx-build AS test
|
FROM buildx-build AS integration-tests
|
||||||
RUN --mount=type=bind,target=. \
|
COPY . .
|
||||||
--mount=type=cache,target=/root/.cache \
|
|
||||||
--mount=type=cache,target=/go/pkg/mod \
|
|
||||||
go test -v -coverprofile=/tmp/coverage.txt -covermode=atomic ./... && \
|
|
||||||
go tool cover -func=/tmp/coverage.txt
|
|
||||||
|
|
||||||
FROM scratch AS test-coverage
|
# FROM golang:1.12-alpine AS docker-cli-build
|
||||||
COPY --from=test /tmp/coverage.txt /coverage.txt
|
# RUN apk add -U git bash coreutils gcc musl-dev
|
||||||
|
# ENV CGO_ENABLED=0
|
||||||
|
# ARG REPO=github.com/tiborvass/cli
|
||||||
|
# ARG BRANCH=cli-plugin-aliases
|
||||||
|
# ARG CLI_VERSION
|
||||||
|
# WORKDIR /go/src/github.com/docker/cli
|
||||||
|
# RUN git clone git://$REPO . && git checkout $BRANCH
|
||||||
|
# RUN ./scripts/build/binary
|
||||||
|
|
||||||
FROM scratch AS binaries-unix
|
FROM scratch AS binaries-unix
|
||||||
COPY --from=buildx-build /usr/bin/buildx /
|
COPY --from=buildx-build /usr/bin/buildx /
|
||||||
@@ -54,29 +63,28 @@ COPY --from=buildx-build /usr/bin/buildx /buildx.exe
|
|||||||
|
|
||||||
FROM binaries-$TARGETOS AS binaries
|
FROM binaries-$TARGETOS AS binaries
|
||||||
|
|
||||||
# Release
|
|
||||||
FROM --platform=$BUILDPLATFORM alpine AS releaser
|
FROM --platform=$BUILDPLATFORM alpine AS releaser
|
||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
RUN --mount=from=binaries \
|
RUN --mount=from=binaries \
|
||||||
--mount=type=bind,source=/tmp/.version,target=/tmp/.version,from=buildx-version \
|
--mount=source=/tmp/.version,target=/tmp/.version,from=buildx-version \
|
||||||
mkdir -p /out && cp buildx* "/out/buildx-$(cat /tmp/.version).$(echo $TARGETPLATFORM | sed 's/\//-/g')$(ls buildx* | sed -e 's/^buildx//')"
|
mkdir -p /out && cp buildx* "/out/buildx-$(cat /tmp/.version).$(echo $TARGETPLATFORM | sed 's/\//-/g')$(ls buildx* | sed -e 's/^buildx//')"
|
||||||
|
|
||||||
FROM scratch AS release
|
FROM scratch AS release
|
||||||
COPY --from=releaser /out/ /
|
COPY --from=releaser /out/ /
|
||||||
|
|
||||||
# Shell
|
FROM alpine AS demo-env
|
||||||
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
|
||||||
FROM alpine AS shell
|
|
||||||
RUN apk add --no-cache iptables tmux git vim less openssh
|
RUN apk add --no-cache iptables tmux git vim less openssh
|
||||||
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
|
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
|
||||||
COPY ./hack/demo-env/entrypoint.sh /usr/local/bin
|
COPY ./hack/demo-env/entrypoint.sh /usr/local/bin
|
||||||
COPY ./hack/demo-env/tmux.conf /root/.tmux.conf
|
COPY ./hack/demo-env/tmux.conf /root/.tmux.conf
|
||||||
COPY --from=dockerd-release /usr/local/bin /usr/local/bin
|
COPY --from=dockerd-release /usr/local/bin /usr/local/bin
|
||||||
|
#COPY --from=docker-cli-build /go/src/github.com/docker/cli/build/docker /usr/local/bin
|
||||||
|
|
||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
COPY ./hack/demo-env/examples .
|
COPY ./hack/demo-env/examples .
|
||||||
COPY --from=binaries / /usr/local/bin/
|
COPY --from=binaries / /usr/local/bin/
|
||||||
VOLUME /var/lib/docker
|
VOLUME /var/lib/docker
|
||||||
ENTRYPOINT ["entrypoint.sh"]
|
ENTRYPOINT ["entrypoint.sh"]
|
||||||
|
|
||||||
FROM binaries
|
FROM binaries
|
||||||
52
Makefile
52
Makefile
@@ -1,51 +1,32 @@
|
|||||||
ifneq (, $(BUILDX_BIN))
|
|
||||||
export BUILDX_CMD = $(BUILDX_BIN)
|
|
||||||
else ifneq (, $(shell docker buildx version))
|
|
||||||
export BUILDX_CMD = docker buildx
|
|
||||||
else ifneq (, $(shell which buildx))
|
|
||||||
export BUILDX_CMD = $(which buildx)
|
|
||||||
else
|
|
||||||
$(error "Buildx is required: https://github.com/docker/buildx#installing")
|
|
||||||
endif
|
|
||||||
|
|
||||||
export BIN_OUT = ./bin
|
|
||||||
export RELEASE_OUT = ./release-out
|
|
||||||
|
|
||||||
shell:
|
shell:
|
||||||
./hack/shell
|
./hack/shell
|
||||||
|
|
||||||
binaries:
|
binaries:
|
||||||
$(BUILDX_CMD) bake binaries
|
./hack/binaries
|
||||||
|
|
||||||
binaries-cross:
|
binaries-cross:
|
||||||
$(BUILDX_CMD) bake binaries-cross
|
EXPORT_LOCAL=cross-out ./hack/cross
|
||||||
|
|
||||||
|
cross:
|
||||||
|
./hack/cross
|
||||||
|
|
||||||
install: binaries
|
install: binaries
|
||||||
mkdir -p ~/.docker/cli-plugins
|
mkdir -p ~/.docker/cli-plugins
|
||||||
install bin/buildx ~/.docker/cli-plugins/docker-buildx
|
install bin/buildx ~/.docker/cli-plugins/docker-buildx
|
||||||
|
|
||||||
release:
|
|
||||||
./hack/release
|
|
||||||
|
|
||||||
validate-all: lint test validate-vendor validate-docs
|
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
$(BUILDX_CMD) bake lint
|
./hack/lint
|
||||||
|
|
||||||
test:
|
test:
|
||||||
$(BUILDX_CMD) bake test
|
./hack/test
|
||||||
|
|
||||||
validate-vendor:
|
validate-vendor:
|
||||||
$(BUILDX_CMD) bake validate-vendor
|
./hack/validate-vendor
|
||||||
|
|
||||||
validate-docs:
|
validate-docs:
|
||||||
$(BUILDX_CMD) bake validate-docs
|
./hack/validate-docs
|
||||||
|
|
||||||
validate-authors:
|
validate-all: lint test validate-vendor validate-docs
|
||||||
$(BUILDX_CMD) bake validate-authors
|
|
||||||
|
|
||||||
test-driver:
|
|
||||||
./hack/test-driver
|
|
||||||
|
|
||||||
vendor:
|
vendor:
|
||||||
./hack/update-vendor
|
./hack/update-vendor
|
||||||
@@ -53,10 +34,7 @@ vendor:
|
|||||||
docs:
|
docs:
|
||||||
./hack/update-docs
|
./hack/update-docs
|
||||||
|
|
||||||
authors:
|
generate-authors:
|
||||||
$(BUILDX_CMD) bake update-authors
|
./hack/generate-authors
|
||||||
|
|
||||||
mod-outdated:
|
.PHONY: vendor lint shell binaries install binaries-cross validate-all generate-authors validate-docs docs
|
||||||
$(BUILDX_CMD) bake mod-outdated
|
|
||||||
|
|
||||||
.PHONY: shell binaries binaries-cross install release validate-all lint validate-vendor validate-docs validate-authors vendor docs authors
|
|
||||||
|
|||||||
312
README.md
312
README.md
@@ -1,13 +1,11 @@
|
|||||||
# buildx
|
# buildx
|
||||||
|
|
||||||
[](https://github.com/docker/buildx/releases/latest)
|
[](https://pkg.go.dev/github.com/docker/buildx)
|
||||||
[](https://pkg.go.dev/github.com/docker/buildx)
|
[](https://github.com/docker/buildx/actions?query=workflow%3Abuild)
|
||||||
[](https://github.com/docker/buildx/actions?query=workflow%3Abuild)
|
[](https://goreportcard.com/report/github.com/docker/buildx)
|
||||||
[](https://goreportcard.com/report/github.com/docker/buildx)
|
[](https://codecov.io/gh/docker/buildx)
|
||||||
[](https://codecov.io/gh/docker/buildx)
|
|
||||||
|
|
||||||
`buildx` is a Docker CLI plugin for extended build capabilities with
|
`buildx` is a Docker CLI plugin for extended build capabilities with [BuildKit](https://github.com/moby/buildkit).
|
||||||
[BuildKit](https://github.com/moby/buildkit).
|
|
||||||
|
|
||||||
Key features:
|
Key features:
|
||||||
|
|
||||||
@@ -22,134 +20,74 @@ Key features:
|
|||||||
# Table of Contents
|
# Table of Contents
|
||||||
|
|
||||||
- [Installing](#installing)
|
- [Installing](#installing)
|
||||||
- [Windows and macOS](#windows-and-macos)
|
- [Docker](#docker)
|
||||||
- [Linux packages](#linux-packages)
|
- [Binary release](#binary-release)
|
||||||
- [Manual download](#manual-download)
|
- [From `Dockerfile`](#from-dockerfile)
|
||||||
- [Dockerfile](#dockerfile)
|
|
||||||
- [Set buildx as the default builder](#set-buildx-as-the-default-builder)
|
|
||||||
- [Building](#building)
|
- [Building](#building)
|
||||||
|
- [with Docker 18.09+](#with-docker-1809)
|
||||||
|
- [with buildx or Docker 19.03](#with-buildx-or-docker-1903)
|
||||||
- [Getting started](#getting-started)
|
- [Getting started](#getting-started)
|
||||||
- [Building with buildx](#building-with-buildx)
|
- [Building with buildx](#building-with-buildx)
|
||||||
- [Working with builder instances](#working-with-builder-instances)
|
- [Working with builder instances](#working-with-builder-instances)
|
||||||
- [Building multi-platform images](#building-multi-platform-images)
|
- [Building multi-platform images](#building-multi-platform-images)
|
||||||
- [High-level build options](#high-level-build-options)
|
- [High-level build options](#high-level-build-options)
|
||||||
- [Guides](docs/guides)
|
- [Documentation](docs/reference)
|
||||||
- [CI/CD](docs/guides/cicd.md)
|
- [`buildx build [OPTIONS] PATH | URL | -`](docs/reference/buildx_build.md)
|
||||||
- [CNI networking](docs/guides/cni-networking.md)
|
- [`buildx create [OPTIONS] [CONTEXT|ENDPOINT]`](docs/reference/buildx_create.md)
|
||||||
- [Registry mirror](docs/guides/registry-mirror.md)
|
- [`buildx use NAME`](docs/reference/buildx_use.md)
|
||||||
- [Resource limiting](docs/guides/resource-limiting.md)
|
- [`buildx inspect [NAME]`](docs/reference/buildx_inspect.md)
|
||||||
- [Using a custom network](docs/guides/custom-network.md)
|
|
||||||
- [Using a custom registry configuration](docs/guides/custom-registry-config.md)
|
|
||||||
- [Reference](docs/reference/buildx.md)
|
|
||||||
- [`buildx bake`](docs/reference/buildx_bake.md)
|
|
||||||
- [`buildx build`](docs/reference/buildx_build.md)
|
|
||||||
- [`buildx create`](docs/reference/buildx_create.md)
|
|
||||||
- [`buildx du`](docs/reference/buildx_du.md)
|
|
||||||
- [`buildx imagetools`](docs/reference/buildx_imagetools.md)
|
|
||||||
- [`buildx imagetools create`](docs/reference/buildx_imagetools_create.md)
|
|
||||||
- [`buildx imagetools inspect`](docs/reference/buildx_imagetools_inspect.md)
|
|
||||||
- [`buildx inspect`](docs/reference/buildx_inspect.md)
|
|
||||||
- [`buildx install`](docs/reference/buildx_install.md)
|
|
||||||
- [`buildx ls`](docs/reference/buildx_ls.md)
|
- [`buildx ls`](docs/reference/buildx_ls.md)
|
||||||
- [`buildx prune`](docs/reference/buildx_prune.md)
|
- [`buildx stop [NAME]`](docs/reference/buildx_stop.md)
|
||||||
- [`buildx rm`](docs/reference/buildx_rm.md)
|
- [`buildx rm [NAME]`](docs/reference/buildx_rm.md)
|
||||||
- [`buildx stop`](docs/reference/buildx_stop.md)
|
- [`buildx bake [OPTIONS] [TARGET...]`](docs/reference/buildx_bake.md)
|
||||||
- [`buildx uninstall`](docs/reference/buildx_uninstall.md)
|
- [`buildx imagetools create [OPTIONS] [SOURCE] [SOURCE...]`](docs/reference/buildx_imagetools_create.md)
|
||||||
- [`buildx use`](docs/reference/buildx_use.md)
|
- [`buildx imagetools inspect NAME`](docs/reference/buildx_imagetools_inspect.md)
|
||||||
- [`buildx version`](docs/reference/buildx_version.md)
|
- [Setting buildx as default builder in Docker 19.03+](#setting-buildx-as-default-builder-in-docker-1903)
|
||||||
- [Contributing](#contributing)
|
- [Contributing](#contributing)
|
||||||
|
|
||||||
|
|
||||||
# Installing
|
# Installing
|
||||||
|
|
||||||
Using `buildx` as a docker CLI plugin requires using Docker 19.03 or newer.
|
Using `buildx` as a docker CLI plugin requires using Docker 19.03 or newer. A limited set of functionality works with older versions of Docker when invoking the binary directly.
|
||||||
A limited set of functionality works with older versions of Docker when
|
|
||||||
invoking the binary directly.
|
|
||||||
|
|
||||||
## Windows and macOS
|
### Docker
|
||||||
|
|
||||||
Docker Buildx is included in [Docker Desktop](https://docs.docker.com/desktop/)
|
`buildx` comes bundled with Docker Desktop and in latest Docker CE packages, but may not be included in all Linux distros (in which case follow the binary release instructions).
|
||||||
for Windows and macOS.
|
|
||||||
|
|
||||||
## Linux packages
|
### Binary release
|
||||||
|
|
||||||
Docker Linux packages also include Docker Buildx when installed using the
|
Download the latest binary release from https://github.com/docker/buildx/releases/latest and copy it to `~/.docker/cli-plugins` folder with name `docker-buildx`.
|
||||||
[DEB or RPM packages](https://docs.docker.com/engine/install/).
|
|
||||||
|
|
||||||
## Manual download
|
Change the permission to execute:
|
||||||
|
```sh
|
||||||
|
chmod a+x ~/.docker/cli-plugins/docker-buildx
|
||||||
|
```
|
||||||
|
|
||||||
> **Important**
|
### From `Dockerfile`
|
||||||
>
|
|
||||||
> This section is for unattended installation of the buildx component. These
|
|
||||||
> instructions are mostly suitable for testing purposes. We do not recommend
|
|
||||||
> installing buildx using manual download in production environments as they
|
|
||||||
> will not be updated automatically with security updates.
|
|
||||||
>
|
|
||||||
> On Windows and macOS, we recommend that you install [Docker Desktop](https://docs.docker.com/desktop/)
|
|
||||||
> instead. For Linux, we recommend that you follow the [instructions specific for your distribution](#linux-packages).
|
|
||||||
|
|
||||||
You can also download the latest binary from the [GitHub releases page](https://github.com/docker/buildx/releases/latest).
|
Here is how to use buildx inside a Dockerfile through the [`docker/buildx-bin`](https://hub.docker.com/r/docker/buildx-bin) image:
|
||||||
|
|
||||||
Rename the relevant binary and copy it to the destination matching your OS:
|
|
||||||
|
|
||||||
| OS | Binary name | Destination folder |
|
|
||||||
| -------- | -------------------- | -----------------------------------------|
|
|
||||||
| Linux | `docker-buildx` | `$HOME/.docker/cli-plugins` |
|
|
||||||
| macOS | `docker-buildx` | `$HOME/.docker/cli-plugins` |
|
|
||||||
| Windows | `docker-buildx.exe` | `%USERPROFILE%\.docker\cli-plugins` |
|
|
||||||
|
|
||||||
Or copy it into one of these folders for installing it system-wide.
|
|
||||||
|
|
||||||
On Unix environments:
|
|
||||||
|
|
||||||
* `/usr/local/lib/docker/cli-plugins` OR `/usr/local/libexec/docker/cli-plugins`
|
|
||||||
* `/usr/lib/docker/cli-plugins` OR `/usr/libexec/docker/cli-plugins`
|
|
||||||
|
|
||||||
On Windows:
|
|
||||||
|
|
||||||
* `C:\ProgramData\Docker\cli-plugins`
|
|
||||||
* `C:\Program Files\Docker\cli-plugins`
|
|
||||||
|
|
||||||
> **Note**
|
|
||||||
>
|
|
||||||
> On Unix environments, it may also be necessary to make it executable with `chmod +x`:
|
|
||||||
> ```shell
|
|
||||||
> $ chmod +x ~/.docker/cli-plugins/docker-buildx
|
|
||||||
> ```
|
|
||||||
|
|
||||||
## Dockerfile
|
|
||||||
|
|
||||||
Here is how to install and use Buildx inside a Dockerfile through the
|
|
||||||
[`docker/buildx-bin`](https://hub.docker.com/r/docker/buildx-bin) image:
|
|
||||||
|
|
||||||
```Dockerfile
|
```Dockerfile
|
||||||
FROM docker
|
FROM docker
|
||||||
COPY --from=docker/buildx-bin /buildx /usr/libexec/docker/cli-plugins/docker-buildx
|
COPY --from=docker/buildx-bin:latest /buildx /usr/libexec/docker/cli-plugins/docker-buildx
|
||||||
RUN docker buildx version
|
RUN docker buildx version
|
||||||
```
|
```
|
||||||
|
|
||||||
# Set buildx as the default builder
|
|
||||||
|
|
||||||
Running the command [`docker buildx install`](docs/reference/buildx_install.md)
|
|
||||||
sets up docker builder command as an alias to `docker buildx build`. This
|
|
||||||
results in the ability to have `docker build` use the current buildx builder.
|
|
||||||
|
|
||||||
To remove this alias, run [`docker buildx uninstall`](docs/reference/buildx_uninstall.md).
|
|
||||||
|
|
||||||
# Building
|
# Building
|
||||||
|
|
||||||
```console
|
|
||||||
# Buildx 0.6+
|
|
||||||
$ docker buildx bake "https://github.com/docker/buildx.git"
|
|
||||||
$ mkdir -p ~/.docker/cli-plugins
|
|
||||||
$ mv ./bin/buildx ~/.docker/cli-plugins/docker-buildx
|
|
||||||
|
|
||||||
# Docker 19.03+
|
### with buildx or Docker 19.03+
|
||||||
$ DOCKER_BUILDKIT=1 docker build --platform=local -o . "https://github.com/docker/buildx.git"
|
```
|
||||||
|
$ export DOCKER_BUILDKIT=1
|
||||||
|
$ docker build --platform=local -o . git://github.com/docker/buildx
|
||||||
$ mkdir -p ~/.docker/cli-plugins
|
$ mkdir -p ~/.docker/cli-plugins
|
||||||
$ mv buildx ~/.docker/cli-plugins/docker-buildx
|
$ mv buildx ~/.docker/cli-plugins/docker-buildx
|
||||||
|
```
|
||||||
|
|
||||||
# Local
|
### with Docker 18.09+
|
||||||
$ git clone https://github.com/docker/buildx.git && cd buildx
|
```
|
||||||
|
$ git clone git://github.com/docker/buildx && cd buildx
|
||||||
$ make install
|
$ make install
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -157,151 +95,65 @@ $ make install
|
|||||||
|
|
||||||
## Building with buildx
|
## Building with buildx
|
||||||
|
|
||||||
Buildx is a Docker CLI plugin that extends the `docker build` command with the
|
Buildx is a Docker CLI plugin that extends the `docker build` command with the full support of the features provided by [Moby BuildKit](https://github.com/moby/buildkit) builder toolkit. It provides the same user experience as `docker build` with many new features like creating scoped builder instances and building against multiple nodes concurrently.
|
||||||
full support of the features provided by [Moby BuildKit](https://github.com/moby/buildkit)
|
|
||||||
builder toolkit. It provides the same user experience as `docker build` with
|
|
||||||
many new features like creating scoped builder instances and building against
|
|
||||||
multiple nodes concurrently.
|
|
||||||
|
|
||||||
After installation, buildx can be accessed through the `docker buildx` command
|
After installation, buildx can be accessed through the `docker buildx` command with Docker 19.03. `docker buildx build` is the command for starting a new build. With Docker versions older than 19.03 buildx binary can be called directly to access the `docker buildx` subcommands.
|
||||||
with Docker 19.03. `docker buildx build` is the command for starting a new
|
|
||||||
build. With Docker versions older than 19.03 buildx binary can be called
|
|
||||||
directly to access the `docker buildx` subcommands.
|
|
||||||
|
|
||||||
```console
|
```
|
||||||
$ docker buildx build .
|
$ docker buildx build .
|
||||||
[+] Building 8.4s (23/32)
|
[+] Building 8.4s (23/32)
|
||||||
=> ...
|
=> ...
|
||||||
```
|
```
|
||||||
|
|
||||||
Buildx will always build using the BuildKit engine and does not require
|
|
||||||
`DOCKER_BUILDKIT=1` environment variable for starting builds.
|
|
||||||
|
|
||||||
The `docker buildx build` command supports features available for `docker build`,
|
Buildx will always build using the BuildKit engine and does not require `DOCKER_BUILDKIT=1` environment variable for starting builds.
|
||||||
including features such as outputs configuration, inline build caching, and
|
|
||||||
specifying target platform. In addition, Buildx also supports new features that
|
|
||||||
are not yet available for regular `docker build` like building manifest lists,
|
|
||||||
distributed caching, and exporting build results to OCI image tarballs.
|
|
||||||
|
|
||||||
Buildx is supposed to be flexible and can be run in different configurations
|
Buildx build command supports the features available for `docker build` including the new features in Docker 19.03 such as outputs configuration, inline build caching or specifying target platform. In addition, buildx supports new features not yet available for regular `docker build` like building manifest lists, distributed caching, exporting build results to OCI image tarballs etc.
|
||||||
that are exposed through a driver concept. Currently, we support a
|
|
||||||
[`docker` driver](docs/reference/buildx_create.md#docker-driver) that uses
|
Buildx is supposed to be flexible and can be run in different configurations that are exposed through a driver concept. Currently, we support a "docker" driver that uses the BuildKit library bundled into the Docker daemon binary, and a "docker-container" driver that automatically launches BuildKit inside a Docker container. We plan to add more drivers in the future, for example, one that would allow running buildx inside an (unprivileged) container.
|
||||||
the BuildKit library bundled into the Docker daemon binary, a
|
|
||||||
[`docker-container` driver](docs/reference/buildx_create.md#docker-container-driver)
|
The user experience of using buildx is very similar across drivers, but there are some features that are not currently supported by the "docker" driver, because the BuildKit library bundled into docker daemon currently uses a different storage component. In contrast, all images built with "docker" driver are automatically added to the "docker images" view by default, whereas when using other drivers the method for outputting an image needs to be selected with `--output`.
|
||||||
that automatically launches BuildKit inside a Docker container and a
|
|
||||||
[`kubernetes` driver](docs/reference/buildx_create.md#kubernetes-driver) to
|
|
||||||
spin up pods with defined BuildKit container image to build your images. We
|
|
||||||
plan to add more drivers in the future.
|
|
||||||
|
|
||||||
The user experience of using buildx is very similar across drivers, but there
|
|
||||||
are some features that are not currently supported by the `docker` driver,
|
|
||||||
because the BuildKit library bundled into docker daemon currently uses a
|
|
||||||
different storage component. In contrast, all images built with `docker` driver
|
|
||||||
are automatically added to the `docker images` view by default, whereas when
|
|
||||||
using other drivers the method for outputting an image needs to be selected
|
|
||||||
with `--output`.
|
|
||||||
|
|
||||||
## Working with builder instances
|
## Working with builder instances
|
||||||
|
|
||||||
By default, buildx will initially use the `docker` driver if it is supported,
|
By default, buildx will initially use the "docker" driver if it is supported, providing a very similar user experience to the native `docker build`. But using a local shared daemon is only one way to build your applications.
|
||||||
providing a very similar user experience to the native `docker build`. Note that
|
|
||||||
you must use a local shared daemon to build your applications.
|
|
||||||
|
|
||||||
Buildx allows you to create new instances of isolated builders. This can be
|
Buildx allows you to create new instances of isolated builders. This can be used for getting a scoped environment for your CI builds that does not change the state of the shared daemon or for isolating the builds for different projects. You can create a new instance for a set of remote nodes, forming a build farm, and quickly switch between them.
|
||||||
used for getting a scoped environment for your CI builds that does not change
|
|
||||||
the state of the shared daemon or for isolating the builds for different
|
|
||||||
projects. You can create a new instance for a set of remote nodes, forming a
|
|
||||||
build farm, and quickly switch between them.
|
|
||||||
|
|
||||||
You can create new instances using the [`docker buildx create`](docs/reference/buildx_create.md)
|
New instances can be created with `docker buildx create` command. This will create a new builder instance with a single node based on your current configuration. To use a remote node you can specify the `DOCKER_HOST` or remote context name while creating the new builder. After creating a new instance you can manage its lifecycle with the `inspect`, `stop` and `rm` commands and list all available builders with `ls`. After creating a new builder you can also append new nodes to it.
|
||||||
command. This creates a new builder instance with a single node based on your
|
|
||||||
current configuration.
|
|
||||||
|
|
||||||
To use a remote node you can specify the `DOCKER_HOST` or the remote context name
|
To switch between different builders, use `docker buildx use <name>`. After running this command the build commands would automatically keep using this builder.
|
||||||
while creating the new builder. After creating a new instance, you can manage its
|
|
||||||
lifecycle using the [`docker buildx inspect`](docs/reference/buildx_inspect.md),
|
|
||||||
[`docker buildx stop`](docs/reference/buildx_stop.md), and
|
|
||||||
[`docker buildx rm`](docs/reference/buildx_rm.md) commands. To list all
|
|
||||||
available builders, use [`buildx ls`](docs/reference/buildx_ls.md). After
|
|
||||||
creating a new builder you can also append new nodes to it.
|
|
||||||
|
|
||||||
To switch between different builders, use [`docker buildx use <name>`](docs/reference/buildx_use.md).
|
Docker 19.03 also features a new `docker context` command that can be used for giving names for remote Docker API endpoints. Buildx integrates with `docker context` so that all of your contexts automatically get a default builder instance. While creating a new builder instance or when adding a node to it you can also set the context name as the target.
|
||||||
After running this command, the build commands will automatically use this
|
|
||||||
builder.
|
|
||||||
|
|
||||||
Docker also features a [`docker context`](https://docs.docker.com/engine/reference/commandline/context/)
|
|
||||||
command that can be used for giving names for remote Docker API endpoints.
|
|
||||||
Buildx integrates with `docker context` so that all of your contexts
|
|
||||||
automatically get a default builder instance. While creating a new builder
|
|
||||||
instance or when adding a node to it you can also set the context name as the
|
|
||||||
target.
|
|
||||||
|
|
||||||
## Building multi-platform images
|
## Building multi-platform images
|
||||||
|
|
||||||
BuildKit is designed to work well for building for multiple platforms and not
|
BuildKit is designed to work well for building for multiple platforms and not only for the architecture and operating system that the user invoking the build happens to run.
|
||||||
only for the architecture and operating system that the user invoking the build
|
|
||||||
happens to run.
|
|
||||||
|
|
||||||
When you invoke a build, you can set the `--platform` flag to specify the target
|
When invoking a build, the `--platform` flag can be used to specify the target platform for the build output, (e.g. linux/amd64, linux/arm64, darwin/amd64). When the current builder instance is backed by the "docker-container" driver, multiple platforms can be specified together. In this case, a manifest list will be built, containing images for all of the specified architectures. When this image is used in `docker run` or `docker service`, Docker will pick the correct image based on the node’s platform.
|
||||||
platform for the build output, (for example, `linux/amd64`, `linux/arm64`, or
|
|
||||||
`darwin/amd64`).
|
|
||||||
|
|
||||||
When the current builder instance is backed by the `docker-container` or
|
Multi-platform images can be built by mainly three different strategies that are all supported by buildx and Dockerfiles. You can use the QEMU emulation support in the kernel, build on multiple native nodes using the same builder instance or use a stage in Dockerfile to cross-compile to different architectures.
|
||||||
`kubernetes` driver, you can specify multiple platforms together. In this case,
|
|
||||||
it builds a manifest list which contains images for all specified architectures.
|
|
||||||
When you use this image in [`docker run`](https://docs.docker.com/engine/reference/commandline/run/)
|
|
||||||
or [`docker service`](https://docs.docker.com/engine/reference/commandline/service/),
|
|
||||||
Docker picks the correct image based on the node's platform.
|
|
||||||
|
|
||||||
You can build multi-platform images using three different strategies that are
|
QEMU is the easiest way to get started if your node already supports it (e.g. if you are using Docker Desktop). It requires no changes to your Dockerfile and BuildKit will automatically detect the secondary architectures that are available. When BuildKit needs to run a binary for a different architecture it will automatically load it through a binary registered in the binfmt_misc handler. For QEMU binaries registered with binfmt_misc on the host OS to work transparently inside containers they must be registered with the fix_binary flag. This requires a kernel >= 4.8 and binfmt-support >= 2.1.7. You can check for proper registration by checking if `F` is among the flags in `/proc/sys/fs/binfmt_misc/qemu-*`. While Docker Desktop comes preconfigured with binfmt_misc support for additional platforms, for other installations it likely needs to be installed using [`tonistiigi/binfmt`](https://github.com/tonistiigi/binfmt) image.
|
||||||
supported by Buildx and Dockerfiles:
|
|
||||||
|
|
||||||
1. Using the QEMU emulation support in the kernel
|
```
|
||||||
2. Building on multiple native nodes using the same builder instance
|
|
||||||
3. Using a stage in Dockerfile to cross-compile to different architectures
|
|
||||||
|
|
||||||
QEMU is the easiest way to get started if your node already supports it (for
|
|
||||||
example. if you are using Docker Desktop). It requires no changes to your
|
|
||||||
Dockerfile and BuildKit automatically detects the secondary architectures that
|
|
||||||
are available. When BuildKit needs to run a binary for a different architecture,
|
|
||||||
it automatically loads it through a binary registered in the `binfmt_misc`
|
|
||||||
handler.
|
|
||||||
|
|
||||||
For QEMU binaries registered with `binfmt_misc` on the host OS to work
|
|
||||||
transparently inside containers they must be registered with the `fix_binary`
|
|
||||||
flag. This requires a kernel >= 4.8 and binfmt-support >= 2.1.7. You can check
|
|
||||||
for proper registration by checking if `F` is among the flags in
|
|
||||||
`/proc/sys/fs/binfmt_misc/qemu-*`. While Docker Desktop comes preconfigured
|
|
||||||
with `binfmt_misc` support for additional platforms, for other installations
|
|
||||||
it likely needs to be installed using [`tonistiigi/binfmt`](https://github.com/tonistiigi/binfmt)
|
|
||||||
image.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker run --privileged --rm tonistiigi/binfmt --install all
|
$ docker run --privileged --rm tonistiigi/binfmt --install all
|
||||||
```
|
```
|
||||||
|
|
||||||
Using multiple native nodes provide better support for more complicated cases
|
Using multiple native nodes provides better support for more complicated cases not handled by QEMU and generally have better performance. Additional nodes can be added to the builder instance with `--append` flag.
|
||||||
that are not handled by QEMU and generally have better performance. You can
|
|
||||||
add additional nodes to the builder instance using the `--append` flag.
|
|
||||||
|
|
||||||
Assuming contexts `node-amd64` and `node-arm64` exist in `docker context ls`;
|
```
|
||||||
|
# assuming contexts node-amd64 and node-arm64 exist in "docker context ls"
|
||||||
```console
|
|
||||||
$ docker buildx create --use --name mybuild node-amd64
|
$ docker buildx create --use --name mybuild node-amd64
|
||||||
mybuild
|
mybuild
|
||||||
$ docker buildx create --append --name mybuild node-arm64
|
$ docker buildx create --append --name mybuild node-arm64
|
||||||
$ docker buildx build --platform linux/amd64,linux/arm64 .
|
$ docker buildx build --platform linux/amd64,linux/arm64 .
|
||||||
```
|
```
|
||||||
|
|
||||||
Finally, depending on your project, the language that you use may have good
|
Finally, depending on your project, the language that you use may have good support for cross-compilation. In that case, multi-stage builds in Dockerfiles can be effectively used to build binaries for the platform specified with `--platform` using the native architecture of the build node. List of build arguments like `BUILDPLATFORM` and `TARGETPLATFORM` are available automatically inside your Dockerfile and can be leveraged by the processes running as part of your build.
|
||||||
support for cross-compilation. In that case, multi-stage builds in Dockerfiles
|
|
||||||
can be effectively used to build binaries for the platform specified with
|
|
||||||
`--platform` using the native architecture of the build node. A list of build
|
|
||||||
arguments like `BUILDPLATFORM` and `TARGETPLATFORM` is available automatically
|
|
||||||
inside your Dockerfile and can be leveraged by the processes running as part
|
|
||||||
of your build.
|
|
||||||
|
|
||||||
```dockerfile
|
```
|
||||||
FROM --platform=$BUILDPLATFORM golang:alpine AS build
|
FROM --platform=$BUILDPLATFORM golang:alpine AS build
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
ARG BUILDPLATFORM
|
ARG BUILDPLATFORM
|
||||||
@@ -310,31 +162,25 @@ FROM alpine
|
|||||||
COPY --from=build /log /log
|
COPY --from=build /log /log
|
||||||
```
|
```
|
||||||
|
|
||||||
You can also use [`tonistiigi/xx`](https://github.com/tonistiigi/xx) Dockerfile
|
|
||||||
cross-compilation helpers for more advanced use-cases.
|
|
||||||
|
|
||||||
## High-level build options
|
## High-level build options
|
||||||
|
|
||||||
Buildx also aims to provide support for high-level build concepts that go beyond
|
Buildx also aims to provide support for higher level build concepts that go beyond invoking a single build command. We want to support building all the images in your application together and let the users define project specific reusable build flows that can then be easily invoked by anyone.
|
||||||
invoking a single build command. We want to support building all the images in
|
|
||||||
your application together and let the users define project specific reusable
|
|
||||||
build flows that can then be easily invoked by anyone.
|
|
||||||
|
|
||||||
BuildKit efficiently handles multiple concurrent build requests and
|
BuildKit has great support for efficiently handling multiple concurrent build requests and deduplicating work. While build commands can be combined with general-purpose command runners (eg. make), these tools generally invoke builds in sequence and therefore can’t leverage the full potential of BuildKit parallelization or combine BuildKit’s output for the user. For this use case we have added a command called `docker buildx bake`.
|
||||||
de-duplicating work. The build commands can be combined with general-purpose
|
|
||||||
command runners (for example, `make`). However, these tools generally invoke
|
|
||||||
builds in sequence and therefore cannot leverage the full potential of BuildKit
|
|
||||||
parallelization, or combine BuildKit’s output for the user. For this use case,
|
|
||||||
we have added a command called [`docker buildx bake`](docs/reference/buildx_bake.md).
|
|
||||||
|
|
||||||
The `bake` command supports building images from compose files, similar to
|
Currently, the bake command supports building images from compose files, similar to `compose build` but allowing all the services to be built concurrently as part of a single request.
|
||||||
[`docker-compose build`](https://docs.docker.com/compose/reference/build/),
|
|
||||||
but allowing all the services to be built concurrently as part of a single
|
There is also support for custom build rules from HCL/JSON files allowing better code reuse and different target groups. The design of bake is in very early stages and we are looking for feedback from users.
|
||||||
request.
|
|
||||||
|
[`buildx bake` Reference Docs](docs/reference/buildx_bake.md)
|
||||||
|
|
||||||
|
# Setting buildx as default builder in Docker 19.03+
|
||||||
|
|
||||||
|
Running `docker buildx install` sets up `docker builder` command as an alias to `docker buildx`. This results in the ability to have `docker build` use the current buildx builder.
|
||||||
|
|
||||||
|
To remove this alias, you can run `docker buildx uninstall`.
|
||||||
|
|
||||||
There is also support for custom build rules from HCL/JSON files allowing
|
|
||||||
better code reuse and different target groups. The design of bake is in very
|
|
||||||
early stages and we are looking for feedback from users.
|
|
||||||
|
|
||||||
# Contributing
|
# Contributing
|
||||||
|
|
||||||
|
|||||||
507
bake/bake.go
507
bake/bake.go
@@ -2,14 +2,11 @@ package bake
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/csv"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
@@ -24,26 +21,14 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var httpPrefix = regexp.MustCompile(`^https?://`)
|
||||||
httpPrefix = regexp.MustCompile(`^https?://`)
|
var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
|
||||||
gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
|
|
||||||
|
|
||||||
validTargetNameChars = `[a-zA-Z0-9_-]+`
|
|
||||||
validTargetNameCharsCompose = `[a-zA-Z0-9._-]+`
|
|
||||||
targetNamePattern = regexp.MustCompile(`^` + validTargetNameChars + `$`)
|
|
||||||
targetNamePatternCompose = regexp.MustCompile(`^` + validTargetNameCharsCompose + `$`)
|
|
||||||
)
|
|
||||||
|
|
||||||
type File struct {
|
type File struct {
|
||||||
Name string
|
Name string
|
||||||
Data []byte
|
Data []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
type Override struct {
|
|
||||||
Value string
|
|
||||||
ArrValue []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func defaultFilenames() []string {
|
func defaultFilenames() []string {
|
||||||
return []string{
|
return []string{
|
||||||
"docker-compose.yml", // support app
|
"docker-compose.yml", // support app
|
||||||
@@ -64,98 +49,41 @@ func ReadLocalFiles(names []string) ([]File, error) {
|
|||||||
out := make([]File, 0, len(names))
|
out := make([]File, 0, len(names))
|
||||||
|
|
||||||
for _, n := range names {
|
for _, n := range names {
|
||||||
var dt []byte
|
dt, err := ioutil.ReadFile(n)
|
||||||
var err error
|
if err != nil {
|
||||||
if n == "-" {
|
if isDefault && errors.Is(err, os.ErrNotExist) {
|
||||||
dt, err = ioutil.ReadAll(os.Stdin)
|
continue
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
dt, err = ioutil.ReadFile(n)
|
|
||||||
if err != nil {
|
|
||||||
if isDefault && errors.Is(err, os.ErrNotExist) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
out = append(out, File{Name: n, Data: dt})
|
out = append(out, File{Name: n, Data: dt})
|
||||||
}
|
}
|
||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReadTargets(ctx context.Context, files []File, targets, overrides []string, defaults map[string]string) (map[string]*Target, []*Group, error) {
|
func ReadTargets(ctx context.Context, files []File, targets, overrides []string, defaults map[string]string) (map[string]*Target, error) {
|
||||||
c, err := ParseFiles(files, defaults)
|
c, err := ParseFiles(files, defaults)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
o, err := c.newOverrides(overrides)
|
o, err := c.newOverrides(overrides)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
m := map[string]*Target{}
|
m := map[string]*Target{}
|
||||||
for _, n := range targets {
|
for _, n := range targets {
|
||||||
for _, n := range c.ResolveGroup(n) {
|
for _, n := range c.ResolveGroup(n) {
|
||||||
t, err := c.ResolveTarget(n, o)
|
t, err := c.ResolveTarget(n, o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if t != nil {
|
if t != nil {
|
||||||
m[n] = t
|
m[n] = t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return m, nil
|
||||||
var g []*Group
|
|
||||||
if len(targets) == 0 || (len(targets) == 1 && targets[0] == "default") {
|
|
||||||
for _, group := range c.Groups {
|
|
||||||
if group.Name != "default" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
g = []*Group{{Targets: group.Targets}}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var gt []string
|
|
||||||
for _, target := range targets {
|
|
||||||
isGroup := false
|
|
||||||
for _, group := range c.Groups {
|
|
||||||
if target == group.Name {
|
|
||||||
gt = append(gt, group.Targets...)
|
|
||||||
isGroup = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !isGroup {
|
|
||||||
gt = append(gt, target)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
g = []*Group{{Targets: dedupString(gt)}}
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, t := range m {
|
|
||||||
if err := c.loadLinks(name, t, m, o, nil); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return m, g, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func dedupString(s []string) []string {
|
|
||||||
if len(s) == 0 {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
var res []string
|
|
||||||
seen := make(map[string]struct{})
|
|
||||||
for _, val := range s {
|
|
||||||
if _, ok := seen[val]; !ok {
|
|
||||||
res = append(res, val)
|
|
||||||
seen[val] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) {
|
func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) {
|
||||||
@@ -191,9 +119,8 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
|
|||||||
|
|
||||||
if len(fs) > 0 {
|
if len(fs) > 0 {
|
||||||
if err := hclparser.Parse(hcl.MergeFiles(fs), hclparser.Opt{
|
if err := hclparser.Parse(hcl.MergeFiles(fs), hclparser.Opt{
|
||||||
LookupVar: os.LookupEnv,
|
LookupVar: os.LookupEnv,
|
||||||
Vars: defaults,
|
Vars: defaults,
|
||||||
ValidateLabel: validateTargetName,
|
|
||||||
}, &c); err.HasErrors() {
|
}, &c); err.HasErrors() {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -313,45 +240,10 @@ func (c Config) expandTargets(pattern string) ([]string, error) {
|
|||||||
return names, nil
|
return names, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[string]map[string]Override, visited []string) error {
|
func (c Config) newOverrides(v []string) (map[string]*Target, error) {
|
||||||
visited = append(visited, name)
|
m := map[string]*Target{}
|
||||||
for _, v := range t.Contexts {
|
|
||||||
if strings.HasPrefix(v, "target:") {
|
|
||||||
target := strings.TrimPrefix(v, "target:")
|
|
||||||
if target == t.Name {
|
|
||||||
return errors.Errorf("target %s cannot link to itself", target)
|
|
||||||
}
|
|
||||||
for _, v := range visited {
|
|
||||||
if v == target {
|
|
||||||
return errors.Errorf("infinite loop from %s to %s", name, target)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
t2, ok := m[target]
|
|
||||||
if !ok {
|
|
||||||
var err error
|
|
||||||
t2, err = c.ResolveTarget(target, o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
t2.Outputs = nil
|
|
||||||
m[target] = t2
|
|
||||||
}
|
|
||||||
if err := c.loadLinks(target, t2, m, o, visited); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(t.Platforms) > 1 && len(t2.Platforms) > 1 {
|
|
||||||
if !sliceEqual(t.Platforms, t2.Platforms) {
|
|
||||||
return errors.Errorf("target %s can't be used by %s because it is defined for different platforms %v and %v", target, name, t2.Platforms, t.Platforms)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c Config) newOverrides(v []string) (map[string]map[string]Override, error) {
|
|
||||||
m := map[string]map[string]Override{}
|
|
||||||
for _, v := range v {
|
for _, v := range v {
|
||||||
|
|
||||||
parts := strings.SplitN(v, "=", 2)
|
parts := strings.SplitN(v, "=", 2)
|
||||||
keys := strings.SplitN(parts[0], ".", 3)
|
keys := strings.SplitN(parts[0], ".", 3)
|
||||||
if len(keys) < 2 {
|
if len(keys) < 2 {
|
||||||
@@ -368,58 +260,85 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
kk := strings.SplitN(parts[0], ".", 2)
|
|
||||||
|
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
t, ok := m[name]
|
t, ok := m[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
t = map[string]Override{}
|
t = &Target{}
|
||||||
m[name] = t
|
|
||||||
}
|
}
|
||||||
|
|
||||||
o := t[kk[1]]
|
|
||||||
|
|
||||||
switch keys[1] {
|
switch keys[1] {
|
||||||
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh":
|
case "context":
|
||||||
if len(parts) == 2 {
|
t.Context = &parts[1]
|
||||||
o.ArrValue = append(o.ArrValue, parts[1])
|
case "dockerfile":
|
||||||
}
|
t.Dockerfile = &parts[1]
|
||||||
case "args":
|
case "args":
|
||||||
if len(keys) != 3 {
|
if len(keys) != 3 {
|
||||||
return nil, errors.Errorf("invalid key %s, args requires name", parts[0])
|
return nil, errors.Errorf("invalid key %s, args requires name", parts[0])
|
||||||
}
|
}
|
||||||
|
if t.Args == nil {
|
||||||
|
t.Args = map[string]string{}
|
||||||
|
}
|
||||||
if len(parts) < 2 {
|
if len(parts) < 2 {
|
||||||
v, ok := os.LookupEnv(keys[2])
|
v, ok := os.LookupEnv(keys[2])
|
||||||
if !ok {
|
if ok {
|
||||||
continue
|
t.Args[keys[2]] = v
|
||||||
}
|
}
|
||||||
o.Value = v
|
} else {
|
||||||
|
t.Args[keys[2]] = parts[1]
|
||||||
}
|
}
|
||||||
fallthrough
|
case "labels":
|
||||||
case "contexts":
|
|
||||||
if len(keys) != 3 {
|
if len(keys) != 3 {
|
||||||
return nil, errors.Errorf("invalid key %s, contexts requires name", parts[0])
|
return nil, errors.Errorf("invalid key %s, lanels requires name", parts[0])
|
||||||
}
|
}
|
||||||
fallthrough
|
if t.Labels == nil {
|
||||||
|
t.Labels = map[string]string{}
|
||||||
|
}
|
||||||
|
t.Labels[keys[2]] = parts[1]
|
||||||
|
case "tags":
|
||||||
|
t.Tags = append(t.Tags, parts[1])
|
||||||
|
case "cache-from":
|
||||||
|
t.CacheFrom = append(t.CacheFrom, parts[1])
|
||||||
|
case "cache-to":
|
||||||
|
t.CacheTo = append(t.CacheTo, parts[1])
|
||||||
|
case "target":
|
||||||
|
s := parts[1]
|
||||||
|
t.Target = &s
|
||||||
|
case "secrets":
|
||||||
|
t.Secrets = append(t.Secrets, parts[1])
|
||||||
|
case "ssh":
|
||||||
|
t.SSH = append(t.SSH, parts[1])
|
||||||
|
case "platform":
|
||||||
|
t.Platforms = append(t.Platforms, parts[1])
|
||||||
|
case "output":
|
||||||
|
t.Outputs = append(t.Outputs, parts[1])
|
||||||
|
case "no-cache":
|
||||||
|
noCache, err := strconv.ParseBool(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Errorf("invalid value %s for boolean key no-cache", parts[1])
|
||||||
|
}
|
||||||
|
t.NoCache = &noCache
|
||||||
|
case "pull":
|
||||||
|
pull, err := strconv.ParseBool(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Errorf("invalid value %s for boolean key pull", parts[1])
|
||||||
|
}
|
||||||
|
t.Pull = &pull
|
||||||
default:
|
default:
|
||||||
if len(parts) == 2 {
|
return nil, errors.Errorf("unknown key: %s", keys[1])
|
||||||
o.Value = parts[1]
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
m[name] = t
|
||||||
t[kk[1]] = o
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) ResolveGroup(name string) []string {
|
func (c Config) ResolveGroup(name string) []string {
|
||||||
return dedupString(c.group(name, map[string][]string{}))
|
return c.group(name, map[string]struct{}{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) group(name string, visited map[string][]string) []string {
|
func (c Config) group(name string, visited map[string]struct{}) []string {
|
||||||
if _, ok := visited[name]; ok {
|
if _, ok := visited[name]; ok {
|
||||||
return visited[name]
|
return nil
|
||||||
}
|
}
|
||||||
var g *Group
|
var g *Group
|
||||||
for _, group := range c.Groups {
|
for _, group := range c.Groups {
|
||||||
@@ -431,26 +350,19 @@ func (c Config) group(name string, visited map[string][]string) []string {
|
|||||||
if g == nil {
|
if g == nil {
|
||||||
return []string{name}
|
return []string{name}
|
||||||
}
|
}
|
||||||
visited[name] = []string{}
|
visited[name] = struct{}{}
|
||||||
targets := make([]string, 0, len(g.Targets))
|
targets := make([]string, 0, len(g.Targets))
|
||||||
for _, t := range g.Targets {
|
for _, t := range g.Targets {
|
||||||
tgroup := c.group(t, visited)
|
targets = append(targets, c.group(t, visited)...)
|
||||||
if len(tgroup) > 0 {
|
|
||||||
targets = append(targets, tgroup...)
|
|
||||||
} else {
|
|
||||||
targets = append(targets, t)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
visited[name] = targets
|
|
||||||
return targets
|
return targets
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) ResolveTarget(name string, overrides map[string]map[string]Override) (*Target, error) {
|
func (c Config) ResolveTarget(name string, overrides map[string]*Target) (*Target, error) {
|
||||||
t, err := c.target(name, map[string]*Target{}, overrides)
|
t, err := c.target(name, map[string]struct{}{}, overrides)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
t.Inherits = nil
|
|
||||||
if t.Context == nil {
|
if t.Context == nil {
|
||||||
s := "."
|
s := "."
|
||||||
t.Context = &s
|
t.Context = &s
|
||||||
@@ -462,11 +374,11 @@ func (c Config) ResolveTarget(name string, overrides map[string]map[string]Overr
|
|||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) target(name string, visited map[string]*Target, overrides map[string]map[string]Override) (*Target, error) {
|
func (c Config) target(name string, visited map[string]struct{}, overrides map[string]*Target) (*Target, error) {
|
||||||
if t, ok := visited[name]; ok {
|
if _, ok := visited[name]; ok {
|
||||||
return t, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
visited[name] = nil
|
visited[name] = struct{}{}
|
||||||
var t *Target
|
var t *Target
|
||||||
for _, target := range c.Targets {
|
for _, target := range c.Targets {
|
||||||
if target.Name == name {
|
if target.Name == name {
|
||||||
@@ -487,15 +399,15 @@ func (c Config) target(name string, visited map[string]*Target, overrides map[st
|
|||||||
tt.Merge(t)
|
tt.Merge(t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
t.Inherits = nil
|
||||||
m := defaultTarget()
|
m := defaultTarget()
|
||||||
m.Merge(tt)
|
m.Merge(tt)
|
||||||
m.Merge(t)
|
m.Merge(t)
|
||||||
tt = m
|
tt = m
|
||||||
if err := tt.AddOverrides(overrides[name]); err != nil {
|
if override, ok := overrides[name]; ok {
|
||||||
return nil, err
|
tt.Merge(override)
|
||||||
}
|
}
|
||||||
tt.normalize()
|
tt.normalize()
|
||||||
visited[name] = tt
|
|
||||||
return tt, nil
|
return tt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -512,7 +424,6 @@ type Target struct {
|
|||||||
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional"`
|
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional"`
|
||||||
|
|
||||||
Context *string `json:"context,omitempty" hcl:"context,optional"`
|
Context *string `json:"context,omitempty" hcl:"context,optional"`
|
||||||
Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional"`
|
|
||||||
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional"`
|
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional"`
|
||||||
DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional"`
|
DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional"`
|
||||||
Args map[string]string `json:"args,omitempty" hcl:"args,optional"`
|
Args map[string]string `json:"args,omitempty" hcl:"args,optional"`
|
||||||
@@ -527,8 +438,7 @@ type Target struct {
|
|||||||
Outputs []string `json:"output,omitempty" hcl:"output,optional"`
|
Outputs []string `json:"output,omitempty" hcl:"output,optional"`
|
||||||
Pull *bool `json:"pull,omitempty" hcl:"pull,optional"`
|
Pull *bool `json:"pull,omitempty" hcl:"pull,optional"`
|
||||||
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional"`
|
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional"`
|
||||||
NetworkMode *string `json:"-" hcl:"-"`
|
|
||||||
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional"`
|
|
||||||
// IMPORTANT: if you add more fields here, do not forget to update newOverrides and README.
|
// IMPORTANT: if you add more fields here, do not forget to update newOverrides and README.
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -540,16 +450,6 @@ func (t *Target) normalize() {
|
|||||||
t.CacheFrom = removeDupes(t.CacheFrom)
|
t.CacheFrom = removeDupes(t.CacheFrom)
|
||||||
t.CacheTo = removeDupes(t.CacheTo)
|
t.CacheTo = removeDupes(t.CacheTo)
|
||||||
t.Outputs = removeDupes(t.Outputs)
|
t.Outputs = removeDupes(t.Outputs)
|
||||||
t.NoCacheFilter = removeDupes(t.NoCacheFilter)
|
|
||||||
|
|
||||||
for k, v := range t.Contexts {
|
|
||||||
if v == "" {
|
|
||||||
delete(t.Contexts, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(t.Contexts) == 0 {
|
|
||||||
t.Contexts = nil
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Target) Merge(t2 *Target) {
|
func (t *Target) Merge(t2 *Target) {
|
||||||
@@ -568,12 +468,6 @@ func (t *Target) Merge(t2 *Target) {
|
|||||||
}
|
}
|
||||||
t.Args[k] = v
|
t.Args[k] = v
|
||||||
}
|
}
|
||||||
for k, v := range t2.Contexts {
|
|
||||||
if t.Contexts == nil {
|
|
||||||
t.Contexts = map[string]string{}
|
|
||||||
}
|
|
||||||
t.Contexts[k] = v
|
|
||||||
}
|
|
||||||
for k, v := range t2.Labels {
|
for k, v := range t2.Labels {
|
||||||
if t.Labels == nil {
|
if t.Labels == nil {
|
||||||
t.Labels = map[string]string{}
|
t.Labels = map[string]string{}
|
||||||
@@ -610,99 +504,9 @@ func (t *Target) Merge(t2 *Target) {
|
|||||||
if t2.NoCache != nil {
|
if t2.NoCache != nil {
|
||||||
t.NoCache = t2.NoCache
|
t.NoCache = t2.NoCache
|
||||||
}
|
}
|
||||||
if t2.NetworkMode != nil {
|
|
||||||
t.NetworkMode = t2.NetworkMode
|
|
||||||
}
|
|
||||||
if t2.NoCacheFilter != nil { // merge
|
|
||||||
t.NoCacheFilter = append(t.NoCacheFilter, t2.NoCacheFilter...)
|
|
||||||
}
|
|
||||||
t.Inherits = append(t.Inherits, t2.Inherits...)
|
t.Inherits = append(t.Inherits, t2.Inherits...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Target) AddOverrides(overrides map[string]Override) error {
|
|
||||||
for key, o := range overrides {
|
|
||||||
value := o.Value
|
|
||||||
keys := strings.SplitN(key, ".", 2)
|
|
||||||
switch keys[0] {
|
|
||||||
case "context":
|
|
||||||
t.Context = &value
|
|
||||||
case "dockerfile":
|
|
||||||
t.Dockerfile = &value
|
|
||||||
case "args":
|
|
||||||
if len(keys) != 2 {
|
|
||||||
return errors.Errorf("args require name")
|
|
||||||
}
|
|
||||||
if t.Args == nil {
|
|
||||||
t.Args = map[string]string{}
|
|
||||||
}
|
|
||||||
t.Args[keys[1]] = value
|
|
||||||
case "contexts":
|
|
||||||
if len(keys) != 2 {
|
|
||||||
return errors.Errorf("contexts require name")
|
|
||||||
}
|
|
||||||
if t.Contexts == nil {
|
|
||||||
t.Contexts = map[string]string{}
|
|
||||||
}
|
|
||||||
t.Contexts[keys[1]] = value
|
|
||||||
case "labels":
|
|
||||||
if len(keys) != 2 {
|
|
||||||
return errors.Errorf("labels require name")
|
|
||||||
}
|
|
||||||
if t.Labels == nil {
|
|
||||||
t.Labels = map[string]string{}
|
|
||||||
}
|
|
||||||
t.Labels[keys[1]] = value
|
|
||||||
case "tags":
|
|
||||||
t.Tags = o.ArrValue
|
|
||||||
case "cache-from":
|
|
||||||
t.CacheFrom = o.ArrValue
|
|
||||||
case "cache-to":
|
|
||||||
t.CacheTo = o.ArrValue
|
|
||||||
case "target":
|
|
||||||
t.Target = &value
|
|
||||||
case "secrets":
|
|
||||||
t.Secrets = o.ArrValue
|
|
||||||
case "ssh":
|
|
||||||
t.SSH = o.ArrValue
|
|
||||||
case "platform":
|
|
||||||
t.Platforms = o.ArrValue
|
|
||||||
case "output":
|
|
||||||
t.Outputs = o.ArrValue
|
|
||||||
case "no-cache":
|
|
||||||
noCache, err := strconv.ParseBool(value)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Errorf("invalid value %s for boolean key no-cache", value)
|
|
||||||
}
|
|
||||||
t.NoCache = &noCache
|
|
||||||
case "no-cache-filter":
|
|
||||||
t.NoCacheFilter = o.ArrValue
|
|
||||||
case "pull":
|
|
||||||
pull, err := strconv.ParseBool(value)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Errorf("invalid value %s for boolean key pull", value)
|
|
||||||
}
|
|
||||||
t.Pull = &pull
|
|
||||||
case "push":
|
|
||||||
_, err := strconv.ParseBool(value)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Errorf("invalid value %s for boolean key push", value)
|
|
||||||
}
|
|
||||||
if len(t.Outputs) == 0 {
|
|
||||||
t.Outputs = append(t.Outputs, "type=image,push=true")
|
|
||||||
} else {
|
|
||||||
for i, output := range t.Outputs {
|
|
||||||
if typ := parseOutputType(output); typ == "image" || typ == "registry" {
|
|
||||||
t.Outputs[i] = t.Outputs[i] + ",push=" + value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return errors.Errorf("unknown key: %s", keys[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func TargetsToBuildOpt(m map[string]*Target, inp *Input) (map[string]build.Options, error) {
|
func TargetsToBuildOpt(m map[string]*Target, inp *Input) (map[string]build.Options, error) {
|
||||||
m2 := make(map[string]build.Options, len(m))
|
m2 := make(map[string]build.Options, len(m))
|
||||||
for k, v := range m {
|
for k, v := range m {
|
||||||
@@ -719,21 +523,6 @@ func updateContext(t *build.Inputs, inp *Input) {
|
|||||||
if inp == nil || inp.State == nil {
|
if inp == nil || inp.State == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range t.NamedContexts {
|
|
||||||
if v.Path == "." {
|
|
||||||
t.NamedContexts[k] = build.NamedContext{Path: inp.URL}
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(v.Path, "cwd://") || strings.HasPrefix(v.Path, "target:") || strings.HasPrefix(v.Path, "docker-image:") {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if IsRemoteURL(v.Path) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
st := llb.Scratch().File(llb.Copy(*inp.State, v.Path, "/"), llb.WithCustomNamef("set context %s to %s", k, v.Path))
|
|
||||||
t.NamedContexts[k] = build.NamedContext{State: &st}
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.ContextPath == "." {
|
if t.ContextPath == "." {
|
||||||
t.ContextPath = inp.URL
|
t.ContextPath = inp.URL
|
||||||
return
|
return
|
||||||
@@ -748,59 +537,6 @@ func updateContext(t *build.Inputs, inp *Input) {
|
|||||||
t.ContextState = &st
|
t.ContextState = &st
|
||||||
}
|
}
|
||||||
|
|
||||||
// validateContextsEntitlements is a basic check to ensure contexts do not
|
|
||||||
// escape local directories when loaded from remote sources. This is to be
|
|
||||||
// replaced with proper entitlements support in the future.
|
|
||||||
func validateContextsEntitlements(t build.Inputs, inp *Input) error {
|
|
||||||
if inp == nil || inp.State == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if v, ok := os.LookupEnv("BAKE_ALLOW_REMOTE_FS_ACCESS"); ok {
|
|
||||||
if vv, _ := strconv.ParseBool(v); vv {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if t.ContextState == nil {
|
|
||||||
if err := checkPath(t.ContextPath); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, v := range t.NamedContexts {
|
|
||||||
if v.State != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := checkPath(v.Path); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkPath(p string) error {
|
|
||||||
if IsRemoteURL(p) || strings.HasPrefix(p, "target:") || strings.HasPrefix(p, "docker-image:") {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
p, err := filepath.EvalSymlinks(p)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
wd, err := os.Getwd()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rel, err := filepath.Rel(wd, p)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
|
||||||
return errors.Errorf("path %s is outside of the working directory, please set BAKE_ALLOW_REMOTE_FS_ACCESS=1", p)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||||
if v := t.Context; v != nil && *v == "-" {
|
if v := t.Context; v != nil && *v == "-" {
|
||||||
return nil, errors.Errorf("context from stdin not allowed in bake")
|
return nil, errors.Errorf("context from stdin not allowed in bake")
|
||||||
@@ -833,15 +569,10 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
if t.Pull != nil {
|
if t.Pull != nil {
|
||||||
pull = *t.Pull
|
pull = *t.Pull
|
||||||
}
|
}
|
||||||
networkMode := ""
|
|
||||||
if t.NetworkMode != nil {
|
|
||||||
networkMode = *t.NetworkMode
|
|
||||||
}
|
|
||||||
|
|
||||||
bi := build.Inputs{
|
bi := build.Inputs{
|
||||||
ContextPath: contextPath,
|
ContextPath: contextPath,
|
||||||
DockerfilePath: dockerfilePath,
|
DockerfilePath: dockerfilePath,
|
||||||
NamedContexts: toNamedContexts(t.Contexts),
|
|
||||||
}
|
}
|
||||||
if t.DockerfileInline != nil {
|
if t.DockerfileInline != nil {
|
||||||
bi.DockerfileInline = *t.DockerfileInline
|
bi.DockerfileInline = *t.DockerfileInline
|
||||||
@@ -850,27 +581,16 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
if strings.HasPrefix(bi.ContextPath, "cwd://") {
|
if strings.HasPrefix(bi.ContextPath, "cwd://") {
|
||||||
bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://"))
|
bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://"))
|
||||||
}
|
}
|
||||||
for k, v := range bi.NamedContexts {
|
|
||||||
if strings.HasPrefix(v.Path, "cwd://") {
|
|
||||||
bi.NamedContexts[k] = build.NamedContext{Path: path.Clean(strings.TrimPrefix(v.Path, "cwd://"))}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := validateContextsEntitlements(bi, inp); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Context = &bi.ContextPath
|
t.Context = &bi.ContextPath
|
||||||
|
|
||||||
bo := &build.Options{
|
bo := &build.Options{
|
||||||
Inputs: bi,
|
Inputs: bi,
|
||||||
Tags: t.Tags,
|
Tags: t.Tags,
|
||||||
BuildArgs: t.Args,
|
BuildArgs: t.Args,
|
||||||
Labels: t.Labels,
|
Labels: t.Labels,
|
||||||
NoCache: noCache,
|
NoCache: noCache,
|
||||||
NoCacheFilter: t.NoCacheFilter,
|
Pull: pull,
|
||||||
Pull: pull,
|
|
||||||
NetworkMode: networkMode,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
platforms, err := platformutil.Parse(t.Platforms)
|
platforms, err := platformutil.Parse(t.Platforms)
|
||||||
@@ -946,56 +666,3 @@ func removeDupes(s []string) []string {
|
|||||||
func isRemoteResource(str string) bool {
|
func isRemoteResource(str string) bool {
|
||||||
return urlutil.IsGitURL(str) || urlutil.IsURL(str)
|
return urlutil.IsGitURL(str) || urlutil.IsURL(str)
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOutputType(str string) string {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(str))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
if len(parts) == 2 {
|
|
||||||
if parts[0] == "type" {
|
|
||||||
return parts[1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateTargetName(name string) error {
|
|
||||||
if !targetNamePattern.MatchString(name) {
|
|
||||||
return errors.Errorf("only %q are allowed", validTargetNameChars)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateTargetNameCompose(name string) error {
|
|
||||||
if !targetNamePatternCompose.MatchString(name) {
|
|
||||||
return errors.Errorf("only %q are allowed", validTargetNameCharsCompose)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func sliceEqual(s1, s2 []string) bool {
|
|
||||||
if len(s1) != len(s2) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
sort.Strings(s1)
|
|
||||||
sort.Strings(s2)
|
|
||||||
for i := range s1 {
|
|
||||||
if s1[i] != s2[i] {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func toNamedContexts(m map[string]string) map[string]build.NamedContext {
|
|
||||||
m2 := make(map[string]build.NamedContext, len(m))
|
|
||||||
for k, v := range m {
|
|
||||||
m2[k] = build.NamedContext{Path: v}
|
|
||||||
}
|
|
||||||
return m2
|
|
||||||
}
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
104
bake/compose.go
104
bake/compose.go
@@ -8,7 +8,6 @@ import (
|
|||||||
|
|
||||||
"github.com/compose-spec/compose-go/loader"
|
"github.com/compose-spec/compose-go/loader"
|
||||||
compose "github.com/compose-spec/compose-go/types"
|
compose "github.com/compose-spec/compose-go/types"
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func parseCompose(dt []byte) (*compose.Project, error) {
|
func parseCompose(dt []byte) (*compose.Project, error) {
|
||||||
@@ -60,10 +59,6 @@ func ParseCompose(dt []byte) (*Config, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = validateTargetNameCompose(s.Name); err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "invalid service name %q", s.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
var contextPathP *string
|
var contextPathP *string
|
||||||
if s.Build.Context != "" {
|
if s.Build.Context != "" {
|
||||||
contextPath := s.Build.Context
|
contextPath := s.Build.Context
|
||||||
@@ -81,23 +76,17 @@ func ParseCompose(dt []byte) (*Config, error) {
|
|||||||
Dockerfile: dockerfilePathP,
|
Dockerfile: dockerfilePathP,
|
||||||
Labels: s.Build.Labels,
|
Labels: s.Build.Labels,
|
||||||
Args: flatten(s.Build.Args.Resolve(func(val string) (string, bool) {
|
Args: flatten(s.Build.Args.Resolve(func(val string) (string, bool) {
|
||||||
if val, ok := s.Environment[val]; ok && val != nil {
|
|
||||||
return *val, true
|
|
||||||
}
|
|
||||||
val, ok := cfg.Environment[val]
|
val, ok := cfg.Environment[val]
|
||||||
return val, ok
|
return val, ok
|
||||||
})),
|
})),
|
||||||
CacheFrom: s.Build.CacheFrom,
|
CacheFrom: s.Build.CacheFrom,
|
||||||
NetworkMode: &s.Build.Network,
|
// TODO: add platforms
|
||||||
}
|
|
||||||
if err = t.composeExtTarget(s.Build.Extensions); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
if s.Build.Target != "" {
|
if s.Build.Target != "" {
|
||||||
target := s.Build.Target
|
target := s.Build.Target
|
||||||
t.Target = &target
|
t.Target = &target
|
||||||
}
|
}
|
||||||
if len(t.Tags) == 0 && s.Image != "" {
|
if s.Image != "" {
|
||||||
t.Tags = []string{s.Image}
|
t.Tags = []string{s.Image}
|
||||||
}
|
}
|
||||||
c.Targets = append(c.Targets, t)
|
c.Targets = append(c.Targets, t)
|
||||||
@@ -122,90 +111,3 @@ func flatten(in compose.MappingWithEquals) compose.Mapping {
|
|||||||
}
|
}
|
||||||
return out
|
return out
|
||||||
}
|
}
|
||||||
|
|
||||||
// composeExtTarget converts Compose build extension x-bake to bake Target
|
|
||||||
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
|
|
||||||
func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
|
||||||
if ext, ok := exts["x-bake"]; ok {
|
|
||||||
for key, val := range ext.(map[string]interface{}) {
|
|
||||||
switch key {
|
|
||||||
case "tags":
|
|
||||||
if res, k := val.(string); k {
|
|
||||||
t.Tags = append(t.Tags, res)
|
|
||||||
} else {
|
|
||||||
for _, res := range val.([]interface{}) {
|
|
||||||
t.Tags = append(t.Tags, res.(string))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "cache-from":
|
|
||||||
t.CacheFrom = []string{} // Needed to override the main field
|
|
||||||
if res, k := val.(string); k {
|
|
||||||
t.CacheFrom = append(t.CacheFrom, res)
|
|
||||||
} else {
|
|
||||||
for _, res := range val.([]interface{}) {
|
|
||||||
t.CacheFrom = append(t.CacheFrom, res.(string))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "cache-to":
|
|
||||||
if res, k := val.(string); k {
|
|
||||||
t.CacheTo = append(t.CacheTo, res)
|
|
||||||
} else {
|
|
||||||
for _, res := range val.([]interface{}) {
|
|
||||||
t.CacheTo = append(t.CacheTo, res.(string))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "secret":
|
|
||||||
if res, k := val.(string); k {
|
|
||||||
t.Secrets = append(t.Secrets, res)
|
|
||||||
} else {
|
|
||||||
for _, res := range val.([]interface{}) {
|
|
||||||
t.Secrets = append(t.Secrets, res.(string))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "ssh":
|
|
||||||
if res, k := val.(string); k {
|
|
||||||
t.SSH = append(t.SSH, res)
|
|
||||||
} else {
|
|
||||||
for _, res := range val.([]interface{}) {
|
|
||||||
t.SSH = append(t.SSH, res.(string))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "platforms":
|
|
||||||
if res, k := val.(string); k {
|
|
||||||
t.Platforms = append(t.Platforms, res)
|
|
||||||
} else {
|
|
||||||
for _, res := range val.([]interface{}) {
|
|
||||||
t.Platforms = append(t.Platforms, res.(string))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "output":
|
|
||||||
if res, k := val.(string); k {
|
|
||||||
t.Outputs = append(t.Outputs, res)
|
|
||||||
} else {
|
|
||||||
for _, res := range val.([]interface{}) {
|
|
||||||
t.Outputs = append(t.Outputs, res.(string))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case "pull":
|
|
||||||
if res, ok := val.(bool); ok {
|
|
||||||
t.Pull = &res
|
|
||||||
}
|
|
||||||
case "no-cache":
|
|
||||||
if res, ok := val.(bool); ok {
|
|
||||||
t.NoCache = &res
|
|
||||||
}
|
|
||||||
case "no-cache-filter":
|
|
||||||
if res, k := val.(string); k {
|
|
||||||
t.NoCacheFilter = append(t.NoCacheFilter, res)
|
|
||||||
} else {
|
|
||||||
for _, res := range val.([]interface{}) {
|
|
||||||
t.NoCacheFilter = append(t.NoCacheFilter, res.(string))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("compose file invalid: unkwown %s field for x-bake", key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -19,8 +19,6 @@ services:
|
|||||||
build:
|
build:
|
||||||
context: ./dir
|
context: ./dir
|
||||||
dockerfile: Dockerfile-alternate
|
dockerfile: Dockerfile-alternate
|
||||||
network:
|
|
||||||
none
|
|
||||||
args:
|
args:
|
||||||
buildno: 123
|
buildno: 123
|
||||||
`)
|
`)
|
||||||
@@ -45,7 +43,6 @@ services:
|
|||||||
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
||||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||||
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
||||||
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoBuildOutOfTreeService(t *testing.T) {
|
func TestNoBuildOutOfTreeService(t *testing.T) {
|
||||||
@@ -217,177 +214,3 @@ networks:
|
|||||||
_, err := ParseCompose(dt)
|
_, err := ParseCompose(dt)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestComposeExt(t *testing.T) {
|
|
||||||
var dt = []byte(`
|
|
||||||
services:
|
|
||||||
addon:
|
|
||||||
image: ct-addon:bar
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: ./Dockerfile
|
|
||||||
cache_from:
|
|
||||||
- user/app:cache
|
|
||||||
args:
|
|
||||||
CT_ECR: foo
|
|
||||||
CT_TAG: bar
|
|
||||||
x-bake:
|
|
||||||
tags:
|
|
||||||
- ct-addon:foo
|
|
||||||
- ct-addon:alp
|
|
||||||
platforms:
|
|
||||||
- linux/amd64
|
|
||||||
- linux/arm64
|
|
||||||
cache-from:
|
|
||||||
- type=local,src=path/to/cache
|
|
||||||
cache-to: local,dest=path/to/cache
|
|
||||||
pull: true
|
|
||||||
|
|
||||||
aws:
|
|
||||||
image: ct-fake-aws:bar
|
|
||||||
build:
|
|
||||||
dockerfile: ./aws.Dockerfile
|
|
||||||
args:
|
|
||||||
CT_ECR: foo
|
|
||||||
CT_TAG: bar
|
|
||||||
x-bake:
|
|
||||||
secret:
|
|
||||||
- id=mysecret,src=/local/secret
|
|
||||||
- id=mysecret2,src=/local/secret2
|
|
||||||
ssh: default
|
|
||||||
platforms: linux/arm64
|
|
||||||
output: type=docker
|
|
||||||
no-cache: true
|
|
||||||
`)
|
|
||||||
|
|
||||||
c, err := ParseCompose(dt)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 2, len(c.Targets))
|
|
||||||
sort.Slice(c.Targets, func(i, j int) bool {
|
|
||||||
return c.Targets[i].Name < c.Targets[j].Name
|
|
||||||
})
|
|
||||||
require.Equal(t, c.Targets[0].Args, map[string]string{"CT_ECR": "foo", "CT_TAG": "bar"})
|
|
||||||
require.Equal(t, c.Targets[0].Tags, []string{"ct-addon:foo", "ct-addon:alp"})
|
|
||||||
require.Equal(t, c.Targets[0].Platforms, []string{"linux/amd64", "linux/arm64"})
|
|
||||||
require.Equal(t, c.Targets[0].CacheFrom, []string{"type=local,src=path/to/cache"})
|
|
||||||
require.Equal(t, c.Targets[0].CacheTo, []string{"local,dest=path/to/cache"})
|
|
||||||
require.Equal(t, c.Targets[0].Pull, newBool(true))
|
|
||||||
require.Equal(t, c.Targets[1].Tags, []string{"ct-fake-aws:bar"})
|
|
||||||
require.Equal(t, c.Targets[1].Secrets, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"})
|
|
||||||
require.Equal(t, c.Targets[1].SSH, []string{"default"})
|
|
||||||
require.Equal(t, c.Targets[1].Platforms, []string{"linux/arm64"})
|
|
||||||
require.Equal(t, c.Targets[1].Outputs, []string{"type=docker"})
|
|
||||||
require.Equal(t, c.Targets[1].NoCache, newBool(true))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestEnv(t *testing.T) {
|
|
||||||
envf, err := os.CreateTemp("", "env")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer os.Remove(envf.Name())
|
|
||||||
|
|
||||||
_, err = envf.WriteString("FOO=bsdf -csdf\n")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
var dt = []byte(`
|
|
||||||
services:
|
|
||||||
scratch:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
args:
|
|
||||||
CT_ECR: foo
|
|
||||||
FOO:
|
|
||||||
NODE_ENV:
|
|
||||||
environment:
|
|
||||||
- NODE_ENV=test
|
|
||||||
- AWS_ACCESS_KEY_ID=dummy
|
|
||||||
- AWS_SECRET_ACCESS_KEY=dummy
|
|
||||||
env_file:
|
|
||||||
- ` + envf.Name() + `
|
|
||||||
`)
|
|
||||||
|
|
||||||
c, err := ParseCompose(dt)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, c.Targets[0].Args, map[string]string{"CT_ECR": "foo", "FOO": "bsdf -csdf", "NODE_ENV": "test"})
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPorts(t *testing.T) {
|
|
||||||
var dt = []byte(`
|
|
||||||
services:
|
|
||||||
foo:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
ports:
|
|
||||||
- 3306:3306
|
|
||||||
bar:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
ports:
|
|
||||||
- mode: ingress
|
|
||||||
target: 3306
|
|
||||||
published: "3306"
|
|
||||||
protocol: tcp
|
|
||||||
`)
|
|
||||||
_, err := ParseCompose(dt)
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBool(val bool) *bool {
|
|
||||||
b := val
|
|
||||||
return &b
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestServiceName(t *testing.T) {
|
|
||||||
cases := []struct {
|
|
||||||
svc string
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
svc: "a",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
svc: "abc",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
svc: "a.b",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
svc: "a?b",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
svc: "_a",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
svc: "a_b",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
svc: "AbC",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
svc: "AbC-0123",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
for _, tt := range cases {
|
|
||||||
tt := tt
|
|
||||||
t.Run(tt.svc, func(t *testing.T) {
|
|
||||||
_, err := ParseCompose([]byte(`
|
|
||||||
services:
|
|
||||||
` + tt.svc + `:
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
`))
|
|
||||||
if tt.wantErr {
|
|
||||||
require.Error(t, err)
|
|
||||||
} else {
|
|
||||||
require.NoError(t, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ package bake
|
|||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/hashicorp/hcl/v2"
|
hcl "github.com/hashicorp/hcl/v2"
|
||||||
"github.com/hashicorp/hcl/v2/hclparse"
|
"github.com/hashicorp/hcl/v2/hclparse"
|
||||||
"github.com/moby/buildkit/solver/errdefs"
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
"github.com/moby/buildkit/solver/pb"
|
"github.com/moby/buildkit/solver/pb"
|
||||||
|
|||||||
@@ -16,9 +16,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Opt struct {
|
type Opt struct {
|
||||||
LookupVar func(string) (string, bool)
|
LookupVar func(string) (string, bool)
|
||||||
Vars map[string]string
|
Vars map[string]string
|
||||||
ValidateLabel func(string) error
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type variable struct {
|
type variable struct {
|
||||||
@@ -263,12 +262,6 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if opt.ValidateLabel == nil {
|
|
||||||
opt.ValidateLabel = func(string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p := &parser{
|
p := &parser{
|
||||||
opt: opt,
|
opt: opt,
|
||||||
|
|
||||||
@@ -453,17 +446,6 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := opt.ValidateLabel(b.Labels[0]); err != nil {
|
|
||||||
return hcl.Diagnostics{
|
|
||||||
&hcl.Diagnostic{
|
|
||||||
Severity: hcl.DiagError,
|
|
||||||
Summary: "Invalid name",
|
|
||||||
Detail: err.Error(),
|
|
||||||
Subject: &b.LabelRanges[0],
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
lblIndex := setLabel(vv, b.Labels[0])
|
lblIndex := setLabel(vv, b.Labels[0])
|
||||||
|
|
||||||
oldValue, exists := t.values[b.Labels[0]]
|
oldValue, exists := t.values[b.Labels[0]]
|
||||||
|
|||||||
@@ -21,10 +21,9 @@ type Input struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
|
func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
|
||||||
var filename string
|
st, filename, ok := detectHTTPContext(url)
|
||||||
st, ok := detectGitContext(url)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
st, filename, ok = detectHTTPContext(url)
|
st, ok = detectGitContext(url)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil, errors.Errorf("not url context")
|
return nil, nil, errors.Errorf("not url context")
|
||||||
}
|
}
|
||||||
|
|||||||
434
build/build.go
434
build/build.go
@@ -3,8 +3,6 @@ package build
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -14,7 +12,6 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"syscall"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
@@ -22,9 +19,7 @@ import (
|
|||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/buildx/util/resolver"
|
clitypes "github.com/docker/cli/cli/config/types"
|
||||||
"github.com/docker/buildx/util/waitmap"
|
|
||||||
"github.com/docker/cli/opts"
|
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
dockerclient "github.com/docker/docker/client"
|
dockerclient "github.com/docker/docker/client"
|
||||||
@@ -32,11 +27,9 @@ import (
|
|||||||
"github.com/docker/docker/pkg/urlutil"
|
"github.com/docker/docker/pkg/urlutil"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/client/llb"
|
"github.com/moby/buildkit/client/llb"
|
||||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
|
||||||
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||||
"github.com/moby/buildkit/session"
|
"github.com/moby/buildkit/session"
|
||||||
"github.com/moby/buildkit/session/upload/uploadprovider"
|
"github.com/moby/buildkit/session/upload/uploadprovider"
|
||||||
"github.com/moby/buildkit/solver/pb"
|
|
||||||
"github.com/moby/buildkit/util/apicaps"
|
"github.com/moby/buildkit/util/apicaps"
|
||||||
"github.com/moby/buildkit/util/entitlements"
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
"github.com/moby/buildkit/util/progress/progresswriter"
|
"github.com/moby/buildkit/util/progress/progresswriter"
|
||||||
@@ -55,27 +48,26 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Inputs Inputs
|
Inputs Inputs
|
||||||
|
Tags []string
|
||||||
|
Labels map[string]string
|
||||||
|
BuildArgs map[string]string
|
||||||
|
Pull bool
|
||||||
|
ImageIDFile string
|
||||||
|
ExtraHosts []string
|
||||||
|
NetworkMode string
|
||||||
|
|
||||||
Allow []entitlements.Entitlement
|
NoCache bool
|
||||||
BuildArgs map[string]string
|
Target string
|
||||||
CacheFrom []client.CacheOptionsEntry
|
Platforms []specs.Platform
|
||||||
CacheTo []client.CacheOptionsEntry
|
Exports []client.ExportEntry
|
||||||
CgroupParent string
|
Session []session.Attachable
|
||||||
Exports []client.ExportEntry
|
|
||||||
ExtraHosts []string
|
CacheFrom []client.CacheOptionsEntry
|
||||||
ImageIDFile string
|
CacheTo []client.CacheOptionsEntry
|
||||||
Labels map[string]string
|
|
||||||
NetworkMode string
|
Allow []entitlements.Entitlement
|
||||||
NoCache bool
|
// DockerTarget
|
||||||
NoCacheFilter []string
|
|
||||||
Platforms []specs.Platform
|
|
||||||
Pull bool
|
|
||||||
Session []session.Attachable
|
|
||||||
ShmSize opts.MemBytes
|
|
||||||
Tags []string
|
|
||||||
Target string
|
|
||||||
Ulimits *opts.UlimitOpt
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Inputs struct {
|
type Inputs struct {
|
||||||
@@ -84,21 +76,17 @@ type Inputs struct {
|
|||||||
InStream io.Reader
|
InStream io.Reader
|
||||||
ContextState *llb.State
|
ContextState *llb.State
|
||||||
DockerfileInline string
|
DockerfileInline string
|
||||||
NamedContexts map[string]NamedContext
|
|
||||||
}
|
|
||||||
|
|
||||||
type NamedContext struct {
|
|
||||||
Path string
|
|
||||||
State *llb.State
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type DriverInfo struct {
|
type DriverInfo struct {
|
||||||
Driver driver.Driver
|
Driver driver.Driver
|
||||||
Name string
|
Name string
|
||||||
Platform []specs.Platform
|
Platform []specs.Platform
|
||||||
Err error
|
Err error
|
||||||
ImageOpt imagetools.Opt
|
}
|
||||||
ProxyConfig map[string]string
|
|
||||||
|
type Auth interface {
|
||||||
|
GetAuthConfig(registryHostname string) (clitypes.AuthConfig, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type DockerAPI interface {
|
type DockerAPI interface {
|
||||||
@@ -189,10 +177,6 @@ func splitToDriverPairs(availablePlatforms map[string]int, opt map[string]Option
|
|||||||
pp = append(pp, p)
|
pp = append(pp, p)
|
||||||
mm[idx] = pp
|
mm[idx] = pp
|
||||||
}
|
}
|
||||||
// if no platform is specified, use first driver
|
|
||||||
if len(mm) == 0 {
|
|
||||||
mm[0] = nil
|
|
||||||
}
|
|
||||||
dps := make([]driverPair, 0, 2)
|
dps := make([]driverPair, 0, 2)
|
||||||
for idx, pp := range mm {
|
for idx, pp := range mm {
|
||||||
dps = append(dps, driverPair{driverIndex: idx, platforms: pp})
|
dps = append(dps, driverPair{driverIndex: idx, platforms: pp})
|
||||||
@@ -202,8 +186,8 @@ func splitToDriverPairs(availablePlatforms map[string]int, opt map[string]Option
|
|||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
func resolveDrivers(ctx context.Context, drivers []DriverInfo, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
|
func resolveDrivers(ctx context.Context, drivers []DriverInfo, auth Auth, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
|
||||||
dps, clients, err := resolveDriversBase(ctx, drivers, opt, pw)
|
dps, clients, err := resolveDriversBase(ctx, drivers, auth, opt, pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -230,7 +214,8 @@ func resolveDrivers(ctx context.Context, drivers []DriverInfo, opt map[string]Op
|
|||||||
}
|
}
|
||||||
|
|
||||||
err = eg.Wait()
|
err = eg.Wait()
|
||||||
tracing.FinishWithError(span, err)
|
span.RecordError(err)
|
||||||
|
span.End()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -243,7 +228,7 @@ func resolveDrivers(ctx context.Context, drivers []DriverInfo, opt map[string]Op
|
|||||||
return dps, clients, nil
|
return dps, clients, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resolveDriversBase(ctx context.Context, drivers []DriverInfo, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
|
func resolveDriversBase(ctx context.Context, drivers []DriverInfo, auth Auth, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
|
||||||
availablePlatforms := map[string]int{}
|
availablePlatforms := map[string]int{}
|
||||||
for i, d := range drivers {
|
for i, d := range drivers {
|
||||||
for _, p := range d.Platform {
|
for _, p := range d.Platform {
|
||||||
@@ -349,8 +334,7 @@ func toRepoOnly(in string) (string, error) {
|
|||||||
return strings.Join(out, ","), nil
|
return strings.Join(out, ","), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
|
func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Options, bopts gateway.BuildOpts, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
|
||||||
d := di.Driver
|
|
||||||
defers := make([]func(), 0, 2)
|
defers := make([]func(), 0, 2)
|
||||||
releaseF := func() {
|
releaseF := func() {
|
||||||
for _, f := range defers {
|
for _, f := range defers {
|
||||||
@@ -416,10 +400,6 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
|||||||
AllowedEntitlements: opt.Allow,
|
AllowedEntitlements: opt.Allow,
|
||||||
}
|
}
|
||||||
|
|
||||||
if opt.CgroupParent != "" {
|
|
||||||
so.FrontendAttrs["cgroup-parent"] = opt.CgroupParent
|
|
||||||
}
|
|
||||||
|
|
||||||
if v, ok := opt.BuildArgs["BUILDKIT_MULTI_PLATFORM"]; ok {
|
if v, ok := opt.BuildArgs["BUILDKIT_MULTI_PLATFORM"]; ok {
|
||||||
if v, _ := strconv.ParseBool(v); v {
|
if v, _ := strconv.ParseBool(v); v {
|
||||||
so.FrontendAttrs["multi-platform"] = "true"
|
so.FrontendAttrs["multi-platform"] = "true"
|
||||||
@@ -516,12 +496,6 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if e.Type == "docker" || e.Type == "image" || e.Type == "oci" {
|
|
||||||
// inline buildinfo attrs from build arg
|
|
||||||
if v, ok := opt.BuildArgs["BUILDKIT_INLINE_BUILDINFO_ATTRS"]; ok {
|
|
||||||
e.Attrs["buildinfo-attrs"] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
so.Exports = opt.Exports
|
so.Exports = opt.Exports
|
||||||
@@ -533,22 +507,12 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
|||||||
}
|
}
|
||||||
defers = append(defers, releaseLoad)
|
defers = append(defers, releaseLoad)
|
||||||
|
|
||||||
if sharedKey := so.LocalDirs["context"]; sharedKey != "" {
|
|
||||||
if p, err := filepath.Abs(sharedKey); err == nil {
|
|
||||||
sharedKey = filepath.Base(p)
|
|
||||||
}
|
|
||||||
so.SharedKey = sharedKey + ":" + tryNodeIdentifier(configDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
if opt.Pull {
|
if opt.Pull {
|
||||||
so.FrontendAttrs["image-resolve-mode"] = "pull"
|
so.FrontendAttrs["image-resolve-mode"] = "pull"
|
||||||
}
|
}
|
||||||
if opt.Target != "" {
|
if opt.Target != "" {
|
||||||
so.FrontendAttrs["target"] = opt.Target
|
so.FrontendAttrs["target"] = opt.Target
|
||||||
}
|
}
|
||||||
if len(opt.NoCacheFilter) > 0 {
|
|
||||||
so.FrontendAttrs["no-cache"] = strings.Join(opt.NoCacheFilter, ",")
|
|
||||||
}
|
|
||||||
if opt.NoCache {
|
if opt.NoCache {
|
||||||
so.FrontendAttrs["no-cache"] = ""
|
so.FrontendAttrs["no-cache"] = ""
|
||||||
}
|
}
|
||||||
@@ -559,12 +523,6 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
|||||||
so.FrontendAttrs["label:"+k] = v
|
so.FrontendAttrs["label:"+k] = v
|
||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range di.ProxyConfig {
|
|
||||||
if _, ok := opt.BuildArgs[k]; !ok {
|
|
||||||
so.FrontendAttrs["build-arg:"+k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// set platforms
|
// set platforms
|
||||||
if len(opt.Platforms) != 0 {
|
if len(opt.Platforms) != 0 {
|
||||||
pp := make([]string, len(opt.Platforms))
|
pp := make([]string, len(opt.Platforms))
|
||||||
@@ -586,7 +544,7 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
|||||||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||||
case "", "default":
|
case "", "default":
|
||||||
default:
|
default:
|
||||||
return nil, nil, errors.Errorf("network mode %q not supported by buildkit. You can define a custom network for your builder using the network driver-opt in buildx create.", opt.NetworkMode)
|
return nil, nil, errors.Errorf("network mode %q not supported by buildkit", opt.NetworkMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// setup extrahosts
|
// setup extrahosts
|
||||||
@@ -596,23 +554,10 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
|||||||
}
|
}
|
||||||
so.FrontendAttrs["add-hosts"] = extraHosts
|
so.FrontendAttrs["add-hosts"] = extraHosts
|
||||||
|
|
||||||
// setup shm size
|
|
||||||
if opt.ShmSize.Value() > 0 {
|
|
||||||
so.FrontendAttrs["shm-size"] = strconv.FormatInt(opt.ShmSize.Value(), 10)
|
|
||||||
}
|
|
||||||
|
|
||||||
// setup ulimits
|
|
||||||
ulimits, err := toBuildkitUlimits(opt.Ulimits)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
} else if len(ulimits) > 0 {
|
|
||||||
so.FrontendAttrs["ulimit"] = ulimits
|
|
||||||
}
|
|
||||||
|
|
||||||
return &so, releaseF, nil
|
return &so, releaseF, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, auth Auth, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
||||||
if len(drivers) == 0 {
|
if len(drivers) == 0 {
|
||||||
return nil, errors.Errorf("driver required for build")
|
return nil, errors.Errorf("driver required for build")
|
||||||
}
|
}
|
||||||
@@ -639,7 +584,7 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m, clients, err := resolveDrivers(ctx, drivers, opt, w)
|
m, clients, err := resolveDrivers(ctx, drivers, auth, opt, w)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -659,12 +604,12 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
multiDriver := len(m[k]) > 1
|
multiDriver := len(m[k]) > 1
|
||||||
hasMobyDriver := false
|
hasMobyDriver := false
|
||||||
for i, dp := range m[k] {
|
for i, dp := range m[k] {
|
||||||
di := drivers[dp.driverIndex]
|
d := drivers[dp.driverIndex].Driver
|
||||||
if di.Driver.IsMobyDriver() {
|
if d.IsMobyDriver() {
|
||||||
hasMobyDriver = true
|
hasMobyDriver = true
|
||||||
}
|
}
|
||||||
opt.Platforms = dp.platforms
|
opt.Platforms = dp.platforms
|
||||||
so, release, err := toSolveOpt(ctx, di, multiDriver, opt, dp.bopts, configDir, w, func(name string) (io.WriteCloser, func(), error) {
|
so, release, err := toSolveOpt(ctx, d, multiDriver, opt, dp.bopts, w, func(name string) (io.WriteCloser, func(), error) {
|
||||||
return newDockerLoader(ctx, docker, name, w)
|
return newDockerLoader(ctx, docker, name, w)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -697,35 +642,8 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate that all links between targets use same drivers
|
|
||||||
for name := range opt {
|
|
||||||
dps := m[name]
|
|
||||||
for _, dp := range dps {
|
|
||||||
for k, v := range dp.so.FrontendAttrs {
|
|
||||||
if strings.HasPrefix(k, "context:") && strings.HasPrefix(v, "target:") {
|
|
||||||
k2 := strings.TrimPrefix(v, "target:")
|
|
||||||
dps2, ok := m[k2]
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.Errorf("failed to find target %s for context %s", k2, strings.TrimPrefix(k, "context:")) // should be validated before already
|
|
||||||
}
|
|
||||||
var found bool
|
|
||||||
for _, dp2 := range dps2 {
|
|
||||||
if dp2.driverIndex == dp.driverIndex {
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
return nil, errors.Errorf("failed to use %s as context %s for %s because targets build with different drivers", k2, strings.TrimPrefix(k, "context:"), name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resp = map[string]*client.SolveResponse{}
|
resp = map[string]*client.SolveResponse{}
|
||||||
var respMu sync.Mutex
|
var respMu sync.Mutex
|
||||||
results := waitmap.New()
|
|
||||||
|
|
||||||
multiTarget := len(opt) > 1
|
multiTarget := len(opt) > 1
|
||||||
|
|
||||||
@@ -746,12 +664,12 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
wg.Add(len(dps))
|
wg.Add(len(dps))
|
||||||
|
|
||||||
var pushNames string
|
var pushNames string
|
||||||
var insecurePush bool
|
|
||||||
|
|
||||||
eg.Go(func() (err error) {
|
eg.Go(func() (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if span != nil {
|
if span != nil {
|
||||||
tracing.FinishWithError(span, err)
|
span.RecordError(err)
|
||||||
|
span.End()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
pw := progress.WithPrefix(w, "default", false)
|
pw := progress.WithPrefix(w, "default", false)
|
||||||
@@ -766,12 +684,8 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
resp[k] = res[0]
|
resp[k] = res[0]
|
||||||
respMu.Unlock()
|
respMu.Unlock()
|
||||||
if len(res) == 1 {
|
if len(res) == 1 {
|
||||||
dgst := res[0].ExporterResponse[exptypes.ExporterImageDigestKey]
|
|
||||||
if v, ok := res[0].ExporterResponse[exptypes.ExporterImageConfigDigestKey]; ok {
|
|
||||||
dgst = v
|
|
||||||
}
|
|
||||||
if opt.ImageIDFile != "" {
|
if opt.ImageIDFile != "" {
|
||||||
return ioutil.WriteFile(opt.ImageIDFile, []byte(dgst), 0644)
|
return ioutil.WriteFile(opt.ImageIDFile, []byte(res[0].ExporterResponse["containerimage.digest"]), 0644)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -781,7 +695,7 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
descs := make([]specs.Descriptor, 0, len(res))
|
descs := make([]specs.Descriptor, 0, len(res))
|
||||||
|
|
||||||
for _, r := range res {
|
for _, r := range res {
|
||||||
s, ok := r.ExporterResponse[exptypes.ExporterImageDigestKey]
|
s, ok := r.ExporterResponse["containerimage.digest"]
|
||||||
if ok {
|
if ok {
|
||||||
descs = append(descs, specs.Descriptor{
|
descs = append(descs, specs.Descriptor{
|
||||||
Digest: digest.Digest(s),
|
Digest: digest.Digest(s),
|
||||||
@@ -791,41 +705,22 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(descs) > 0 {
|
if len(descs) > 0 {
|
||||||
var imageopt imagetools.Opt
|
itpull := imagetools.New(imagetools.Opt{
|
||||||
for _, dp := range dps {
|
Auth: auth,
|
||||||
imageopt = drivers[dp.driverIndex].ImageOpt
|
})
|
||||||
break
|
|
||||||
}
|
|
||||||
names := strings.Split(pushNames, ",")
|
names := strings.Split(pushNames, ",")
|
||||||
|
|
||||||
if insecurePush {
|
|
||||||
insecureTrue := true
|
|
||||||
httpTrue := true
|
|
||||||
nn, err := reference.ParseNormalizedNamed(names[0])
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
imageopt.RegistryConfig = map[string]resolver.RegistryConfig{
|
|
||||||
reference.Domain(nn): {
|
|
||||||
Insecure: &insecureTrue,
|
|
||||||
PlainHTTP: &httpTrue,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
itpull := imagetools.New(imageopt)
|
|
||||||
|
|
||||||
dt, desc, err := itpull.Combine(ctx, names[0], descs)
|
dt, desc, err := itpull.Combine(ctx, names[0], descs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if opt.ImageIDFile != "" {
|
if opt.ImageIDFile != "" {
|
||||||
if err := ioutil.WriteFile(opt.ImageIDFile, []byte(desc.Digest), 0644); err != nil {
|
return ioutil.WriteFile(opt.ImageIDFile, []byte(desc.Digest), 0644)
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
itpush := imagetools.New(imageopt)
|
itpush := imagetools.New(imagetools.Opt{
|
||||||
|
Auth: auth,
|
||||||
|
})
|
||||||
|
|
||||||
for _, n := range names {
|
for _, n := range names {
|
||||||
nn, err := reference.ParseNormalizedNamed(n)
|
nn, err := reference.ParseNormalizedNamed(n)
|
||||||
@@ -853,6 +748,7 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
|
|
||||||
for i, dp := range dps {
|
for i, dp := range dps {
|
||||||
so := *dp.so
|
so := *dp.so
|
||||||
|
|
||||||
if multiDriver {
|
if multiDriver {
|
||||||
for i, e := range so.Exports {
|
for i, e := range so.Exports {
|
||||||
switch e.Type {
|
switch e.Type {
|
||||||
@@ -869,9 +765,6 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if ok, _ := strconv.ParseBool(e.Attrs["registry.insecure"]); ok {
|
|
||||||
insecurePush = true
|
|
||||||
}
|
|
||||||
e.Attrs["name"] = names
|
e.Attrs["name"] = names
|
||||||
e.Attrs["push-by-digest"] = "true"
|
e.Attrs["push-by-digest"] = "true"
|
||||||
so.Exports[i].Attrs = e.Attrs
|
so.Exports[i].Attrs = e.Attrs
|
||||||
@@ -885,43 +778,14 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
pw := progress.WithPrefix(w, k, multiTarget)
|
pw := progress.WithPrefix(w, k, multiTarget)
|
||||||
|
|
||||||
c := clients[dp.driverIndex]
|
c := clients[dp.driverIndex]
|
||||||
|
|
||||||
|
pw = progress.ResetTime(pw)
|
||||||
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
pw = progress.ResetTime(pw)
|
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
if err := waitContextDeps(ctx, dp.driverIndex, results, &so); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
frontendInputs := make(map[string]*pb.Definition)
|
|
||||||
for key, st := range so.FrontendInputs {
|
|
||||||
def, err := st.Marshal(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
frontendInputs[key] = def.ToPB()
|
|
||||||
}
|
|
||||||
|
|
||||||
req := gateway.SolveRequest{
|
|
||||||
Frontend: so.Frontend,
|
|
||||||
FrontendOpt: so.FrontendAttrs,
|
|
||||||
FrontendInputs: frontendInputs,
|
|
||||||
}
|
|
||||||
so.Frontend = ""
|
|
||||||
so.FrontendAttrs = nil
|
|
||||||
so.FrontendInputs = nil
|
|
||||||
|
|
||||||
ch, done := progress.NewChannel(pw)
|
ch, done := progress.NewChannel(pw)
|
||||||
defer func() { <-done }()
|
defer func() { <-done }()
|
||||||
|
rr, err := c.Solve(ctx, nil, so, ch)
|
||||||
rr, err := c.Build(ctx, so, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
|
||||||
res, err := c.Solve(ctx, req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
results.Set(resultKey(dp.driverIndex, k), res)
|
|
||||||
return res, nil
|
|
||||||
}, ch)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -937,27 +801,13 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
return errors.Errorf("tag is needed when pushing to registry")
|
return errors.Errorf("tag is needed when pushing to registry")
|
||||||
}
|
}
|
||||||
pw := progress.ResetTime(pw)
|
pw := progress.ResetTime(pw)
|
||||||
pushList := strings.Split(pushNames, ",")
|
for _, name := range strings.Split(pushNames, ",") {
|
||||||
for _, name := range pushList {
|
|
||||||
if err := progress.Wrap(fmt.Sprintf("pushing %s with docker", name), pw.Write, func(l progress.SubLogger) error {
|
if err := progress.Wrap(fmt.Sprintf("pushing %s with docker", name), pw.Write, func(l progress.SubLogger) error {
|
||||||
return pushWithMoby(ctx, d, name, l)
|
return pushWithMoby(ctx, d, name, l)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
remoteDigest, err := remoteDigestWithMoby(ctx, d, pushList[0])
|
|
||||||
if err == nil && remoteDigest != "" {
|
|
||||||
// old daemons might not have containerimage.config.digest set
|
|
||||||
// in response so use containerimage.digest value for it if available
|
|
||||||
if _, ok := rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey]; !ok {
|
|
||||||
if v, ok := rr.ExporterResponse[exptypes.ExporterImageDigestKey]; ok {
|
|
||||||
rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rr.ExporterResponse[exptypes.ExporterImageDigestKey] = remoteDigest
|
|
||||||
} else if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1062,29 +912,6 @@ func pushWithMoby(ctx context.Context, d driver.Driver, name string, l progress.
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func remoteDigestWithMoby(ctx context.Context, d driver.Driver, name string) (string, error) {
|
|
||||||
api := d.Config().DockerAPI
|
|
||||||
if api == nil {
|
|
||||||
return "", errors.Errorf("invalid empty Docker API reference") // should never happen
|
|
||||||
}
|
|
||||||
creds, err := imagetools.RegistryAuthForRef(name, d.Config().Auth)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
image, _, err := api.ImageInspectWithRaw(ctx, name)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if len(image.RepoDigests) == 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
remoteImage, err := api.DistributionInspect(ctx, name, creds)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return remoteImage.Descriptor.Digest.String(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createTempDockerfile(r io.Reader) (string, error) {
|
func createTempDockerfile(r io.Reader) (string, error) {
|
||||||
dir, err := ioutil.TempDir("", "dockerfile")
|
dir, err := ioutil.TempDir("", "dockerfile")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1207,36 +1034,6 @@ func LoadInputs(ctx context.Context, d driver.Driver, inp Inputs, pw progress.Wr
|
|||||||
|
|
||||||
target.FrontendAttrs["filename"] = dockerfileName
|
target.FrontendAttrs["filename"] = dockerfileName
|
||||||
|
|
||||||
for k, v := range inp.NamedContexts {
|
|
||||||
target.FrontendAttrs["frontend.caps"] = "moby.buildkit.frontend.contexts+forward"
|
|
||||||
if v.State != nil {
|
|
||||||
target.FrontendAttrs["context:"+k] = "input:" + k
|
|
||||||
if target.FrontendInputs == nil {
|
|
||||||
target.FrontendInputs = make(map[string]llb.State)
|
|
||||||
}
|
|
||||||
target.FrontendInputs[k] = *v.State
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if urlutil.IsGitURL(v.Path) || urlutil.IsURL(v.Path) || strings.HasPrefix(v.Path, "docker-image://") || strings.HasPrefix(v.Path, "target:") {
|
|
||||||
target.FrontendAttrs["context:"+k] = v.Path
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
st, err := os.Stat(v.Path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "failed to get build context %v", k)
|
|
||||||
}
|
|
||||||
if !st.IsDir() {
|
|
||||||
return nil, errors.Wrapf(syscall.ENOTDIR, "failed to get build context path %v", v)
|
|
||||||
}
|
|
||||||
localName := k
|
|
||||||
if k == "context" || k == "dockerfile" {
|
|
||||||
localName = "_" + k // underscore to avoid collisions
|
|
||||||
}
|
|
||||||
target.LocalDirs[localName] = v.Path
|
|
||||||
target.FrontendAttrs["context:"+k] = "local:" + localName
|
|
||||||
}
|
|
||||||
|
|
||||||
release := func() {
|
release := func() {
|
||||||
for _, dir := range toRemove {
|
for _, dir := range toRemove {
|
||||||
os.RemoveAll(dir)
|
os.RemoveAll(dir)
|
||||||
@@ -1245,96 +1042,6 @@ func LoadInputs(ctx context.Context, d driver.Driver, inp Inputs, pw progress.Wr
|
|||||||
return release, nil
|
return release, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func resultKey(index int, name string) string {
|
|
||||||
return fmt.Sprintf("%d-%s", index, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitContextDeps(ctx context.Context, index int, results *waitmap.Map, so *client.SolveOpt) error {
|
|
||||||
m := map[string]string{}
|
|
||||||
for k, v := range so.FrontendAttrs {
|
|
||||||
if strings.HasPrefix(k, "context:") && strings.HasPrefix(v, "target:") {
|
|
||||||
target := resultKey(index, strings.TrimPrefix(v, "target:"))
|
|
||||||
m[target] = k
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(m) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
keys := make([]string, 0, len(m))
|
|
||||||
for k := range m {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
res, err := results.Get(ctx, keys...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range m {
|
|
||||||
r, ok := res[k]
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
rr, ok := r.(*gateway.Result)
|
|
||||||
if !ok {
|
|
||||||
return errors.Errorf("invalid result type %T", rr)
|
|
||||||
}
|
|
||||||
if so.FrontendAttrs == nil {
|
|
||||||
so.FrontendAttrs = map[string]string{}
|
|
||||||
}
|
|
||||||
if so.FrontendInputs == nil {
|
|
||||||
so.FrontendInputs = map[string]llb.State{}
|
|
||||||
}
|
|
||||||
if len(rr.Refs) > 0 {
|
|
||||||
for platform, r := range rr.Refs {
|
|
||||||
st, err := r.ToState()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
so.FrontendInputs[k+"::"+platform] = st
|
|
||||||
so.FrontendAttrs[v+"::"+platform] = "input:" + k + "::" + platform
|
|
||||||
metadata := make(map[string][]byte)
|
|
||||||
if dt, ok := rr.Metadata[exptypes.ExporterImageConfigKey+"/"+platform]; ok {
|
|
||||||
metadata[exptypes.ExporterImageConfigKey] = dt
|
|
||||||
}
|
|
||||||
if dt, ok := rr.Metadata[exptypes.ExporterBuildInfo+"/"+platform]; ok {
|
|
||||||
metadata[exptypes.ExporterBuildInfo] = dt
|
|
||||||
}
|
|
||||||
if len(metadata) > 0 {
|
|
||||||
dt, err := json.Marshal(metadata)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
so.FrontendAttrs["input-metadata:"+k+"::"+platform] = string(dt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
delete(so.FrontendAttrs, v)
|
|
||||||
}
|
|
||||||
if rr.Ref != nil {
|
|
||||||
st, err := rr.Ref.ToState()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
so.FrontendInputs[k] = st
|
|
||||||
so.FrontendAttrs[v] = "input:" + k
|
|
||||||
metadata := make(map[string][]byte)
|
|
||||||
if dt, ok := rr.Metadata[exptypes.ExporterImageConfigKey]; ok {
|
|
||||||
metadata[exptypes.ExporterImageConfigKey] = dt
|
|
||||||
}
|
|
||||||
if dt, ok := rr.Metadata[exptypes.ExporterBuildInfo]; ok {
|
|
||||||
metadata[exptypes.ExporterBuildInfo] = dt
|
|
||||||
}
|
|
||||||
if len(metadata) > 0 {
|
|
||||||
dt, err := json.Marshal(metadata)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
so.FrontendAttrs["input-metadata:"+k] = string(dt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func notSupported(d driver.Driver, f driver.Feature) error {
|
func notSupported(d driver.Driver, f driver.Feature) error {
|
||||||
return errors.Errorf("%s feature is currently not supported for %s driver. Please switch to a different driver (eg. \"docker buildx create --use\")", f, d.Factory().Name())
|
return errors.Errorf("%s feature is currently not supported for %s driver. Please switch to a different driver (eg. \"docker buildx create --use\")", f, d.Factory().Name())
|
||||||
}
|
}
|
||||||
@@ -1451,28 +1158,3 @@ func wrapWriteCloser(wc io.WriteCloser) func(map[string]string) (io.WriteCloser,
|
|||||||
return wc, nil
|
return wc, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var nodeIdentifierMu sync.Mutex
|
|
||||||
|
|
||||||
func tryNodeIdentifier(configDir string) (out string) {
|
|
||||||
nodeIdentifierMu.Lock()
|
|
||||||
defer nodeIdentifierMu.Unlock()
|
|
||||||
sessionFile := filepath.Join(configDir, ".buildNodeID")
|
|
||||||
if _, err := os.Lstat(sessionFile); err != nil {
|
|
||||||
if os.IsNotExist(err) { // create a new file with stored randomness
|
|
||||||
b := make([]byte, 8)
|
|
||||||
if _, err := rand.Read(b); err != nil {
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
if err := ioutil.WriteFile(sessionFile, []byte(hex.EncodeToString(b)), 0600); err != nil {
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dt, err := ioutil.ReadFile(sessionFile)
|
|
||||||
if err == nil {
|
|
||||||
return string(dt)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/cli/opts"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -54,15 +53,3 @@ func toBuildkitExtraHosts(inp []string) (string, error) {
|
|||||||
}
|
}
|
||||||
return strings.Join(hosts, ","), nil
|
return strings.Join(hosts, ","), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// toBuildkitUlimits converts ulimits from docker type=soft:hard format to buildkit's csv format
|
|
||||||
func toBuildkitUlimits(inp *opts.UlimitOpt) (string, error) {
|
|
||||||
if inp == nil || len(inp.GetList()) == 0 {
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
ulimits := make([]string, 0, len(inp.GetList()))
|
|
||||||
for _, ulimit := range inp.GetList() {
|
|
||||||
ulimits = append(ulimits, ulimit.String())
|
|
||||||
}
|
|
||||||
return strings.Join(ulimits, ","), nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -43,19 +43,21 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
if plugin.RunningStandalone() {
|
if os.Getenv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND") == "" {
|
||||||
dockerCli, err := command.NewDockerCli()
|
if len(os.Args) < 2 || os.Args[1] != manager.MetadataSubcommandName {
|
||||||
if err != nil {
|
dockerCli, err := command.NewDockerCli()
|
||||||
fmt.Fprintln(os.Stderr, err)
|
if err != nil {
|
||||||
os.Exit(1)
|
fmt.Fprintln(os.Stderr, err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
opts := cliflags.NewClientOptions()
|
||||||
|
dockerCli.Initialize(opts)
|
||||||
|
rootCmd := commands.NewRootCmd(os.Args[0], false, dockerCli)
|
||||||
|
if err := rootCmd.Execute(); err != nil {
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
opts := cliflags.NewClientOptions()
|
|
||||||
dockerCli.Initialize(opts)
|
|
||||||
rootCmd := commands.NewRootCmd(os.Args[0], false, dockerCli)
|
|
||||||
if err := rootCmd.Execute(); err != nil {
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
dockerCli, err := command.NewDockerCli()
|
dockerCli, err := command.NewDockerCli()
|
||||||
|
|||||||
@@ -6,13 +6,12 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/containerd/containerd/platforms"
|
|
||||||
"github.com/docker/buildx/bake"
|
"github.com/docker/buildx/bake"
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
"github.com/docker/buildx/util/confutil"
|
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/buildx/util/tracing"
|
"github.com/docker/buildx/util/tracing"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@@ -20,8 +19,8 @@ import (
|
|||||||
|
|
||||||
type bakeOptions struct {
|
type bakeOptions struct {
|
||||||
files []string
|
files []string
|
||||||
overrides []string
|
|
||||||
printOnly bool
|
printOnly bool
|
||||||
|
overrides []string
|
||||||
commonOptions
|
commonOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -47,6 +46,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
|||||||
if bake.IsRemoteURL(targets[0]) {
|
if bake.IsRemoteURL(targets[0]) {
|
||||||
cmdContext = targets[0]
|
cmdContext = targets[0]
|
||||||
targets = targets[1:]
|
targets = targets[1:]
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -61,7 +61,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
|||||||
if in.exportLoad {
|
if in.exportLoad {
|
||||||
return errors.Errorf("push and load may not be set together at the moment")
|
return errors.Errorf("push and load may not be set together at the moment")
|
||||||
}
|
}
|
||||||
overrides = append(overrides, "*.push=true")
|
overrides = append(overrides, "*.output=type=registry")
|
||||||
} else if in.exportLoad {
|
} else if in.exportLoad {
|
||||||
overrides = append(overrides, "*.output=type=docker")
|
overrides = append(overrides, "*.output=type=docker")
|
||||||
}
|
}
|
||||||
@@ -75,7 +75,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
|||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancel(context.TODO())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
printer := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
|
printer := progress.NewPrinter(ctx2, os.Stderr, in.progress)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if printer != nil {
|
if printer != nil {
|
||||||
@@ -103,36 +103,21 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{
|
m, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{
|
||||||
// Don't forget to update documentation if you add a new
|
"BAKE_CMD_CONTEXT": cmdContext,
|
||||||
// built-in variable: docs/reference/buildx_bake.md#built-in-variables
|
|
||||||
"BAKE_CMD_CONTEXT": cmdContext,
|
|
||||||
"BAKE_LOCAL_PLATFORM": platforms.DefaultString(),
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// this function can update target context string from the input so call before printOnly check
|
// this function can update target context string from the input so call before printOnly check
|
||||||
bo, err := bake.TargetsToBuildOpt(tgts, inp)
|
bo, err := bake.TargetsToBuildOpt(m, inp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if in.printOnly {
|
if in.printOnly {
|
||||||
var defg map[string]*bake.Group
|
dt, err := json.MarshalIndent(map[string]map[string]*bake.Target{"target": m}, "", " ")
|
||||||
if len(grps) == 1 {
|
|
||||||
defg = map[string]*bake.Group{
|
|
||||||
"default": grps[0],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dt, err := json.MarshalIndent(struct {
|
|
||||||
Group map[string]*bake.Group `json:"group,omitempty"`
|
|
||||||
Target map[string]*bake.Target `json:"target"`
|
|
||||||
}{
|
|
||||||
defg,
|
|
||||||
tgts,
|
|
||||||
}, "", " ")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -145,17 +130,21 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
resp, err := build.Build(ctx, dis, bo, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
resp, err := build.Build(ctx, dis, bo, dockerAPI(dockerCli), dockerCli.ConfigFile(), printer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return wrapBuildError(err, true)
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(in.metadataFile) > 0 {
|
if len(in.metadataFile) > 0 && resp != nil {
|
||||||
dt := make(map[string]interface{})
|
mdata := map[string]map[string]string{}
|
||||||
for t, r := range resp {
|
for k, r := range resp {
|
||||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
mdata[k] = r.ExporterResponse
|
||||||
}
|
}
|
||||||
if err := writeMetadataFile(in.metadataFile, dt); err != nil {
|
mdatab, err := json.MarshalIndent(mdata, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ioutils.AtomicWriteFile(in.metadataFile, mdatab, 0644); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -186,10 +175,10 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
||||||
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
|
|
||||||
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
|
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
|
||||||
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
|
flags.StringArrayVar(&options.overrides, "set", nil, "Override target value (eg: targetpattern.key=value)")
|
||||||
flags.StringArrayVar(&options.overrides, "set", nil, `Override target value (e.g., "targetpattern.key=value")`)
|
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for --set=*.output=type=registry")
|
||||||
|
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for --set=*.output=type=docker")
|
||||||
|
|
||||||
commonBuildFlags(&options.commonOptions, flags)
|
commonBuildFlags(&options.commonOptions, flags)
|
||||||
|
|
||||||
|
|||||||
@@ -1,78 +1,79 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
"github.com/docker/buildx/util/buildflags"
|
"github.com/docker/buildx/util/buildflags"
|
||||||
"github.com/docker/buildx/util/confutil"
|
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/buildx/util/tracing"
|
"github.com/docker/buildx/util/tracing"
|
||||||
"github.com/docker/cli-docs-tool/annotation"
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
dockeropts "github.com/docker/cli/opts"
|
|
||||||
"github.com/docker/distribution/reference"
|
|
||||||
"github.com/docker/docker/pkg/ioutils"
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/docker/go-units"
|
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/session/auth/authprovider"
|
"github.com/moby/buildkit/session/auth/authprovider"
|
||||||
"github.com/moby/buildkit/solver/errdefs"
|
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/moby/buildkit/util/grpcerrors"
|
|
||||||
"github.com/morikuni/aec"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const defaultTargetName = "default"
|
const defaultTargetName = "default"
|
||||||
|
|
||||||
type buildOptions struct {
|
type buildOptions struct {
|
||||||
|
commonOptions
|
||||||
contextPath string
|
contextPath string
|
||||||
dockerfileName string
|
dockerfileName string
|
||||||
|
tags []string
|
||||||
|
labels []string
|
||||||
|
buildArgs []string
|
||||||
|
|
||||||
allow []string
|
cacheFrom []string
|
||||||
buildArgs []string
|
cacheTo []string
|
||||||
cacheFrom []string
|
target string
|
||||||
cacheTo []string
|
platforms []string
|
||||||
cgroupParent string
|
secrets []string
|
||||||
contexts []string
|
ssh []string
|
||||||
extraHosts []string
|
outputs []string
|
||||||
imageIDFile string
|
imageIDFile string
|
||||||
labels []string
|
extraHosts []string
|
||||||
networkMode string
|
networkMode string
|
||||||
noCacheFilter []string
|
|
||||||
outputs []string
|
// unimplemented
|
||||||
platforms []string
|
squash bool
|
||||||
quiet bool
|
quiet bool
|
||||||
secrets []string
|
|
||||||
shmSize dockeropts.MemBytes
|
allow []string
|
||||||
ssh []string
|
|
||||||
tags []string
|
// hidden
|
||||||
target string
|
// untrusted bool
|
||||||
ulimits *dockeropts.UlimitOpt
|
// ulimits *opts.UlimitOpt
|
||||||
commonOptions
|
// memory opts.MemBytes
|
||||||
|
// memorySwap opts.MemSwapBytes
|
||||||
|
// shmSize opts.MemBytes
|
||||||
|
// cpuShares int64
|
||||||
|
// cpuPeriod int64
|
||||||
|
// cpuQuota int64
|
||||||
|
// cpuSetCpus string
|
||||||
|
// cpuSetMems string
|
||||||
|
// cgroupParent string
|
||||||
|
// isolation string
|
||||||
|
// compress bool
|
||||||
|
// securityOpt []string
|
||||||
}
|
}
|
||||||
|
|
||||||
type commonOptions struct {
|
type commonOptions struct {
|
||||||
builder string
|
builder string
|
||||||
metadataFile string
|
|
||||||
noCache *bool
|
noCache *bool
|
||||||
progress string
|
progress string
|
||||||
pull *bool
|
pull *bool
|
||||||
|
metadataFile string
|
||||||
// golangci-lint#826
|
// golangci-lint#826
|
||||||
// nolint:structcheck
|
// nolint:structcheck
|
||||||
exportPush bool
|
exportPush bool
|
||||||
@@ -81,6 +82,13 @@ type commonOptions struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
||||||
|
if in.squash {
|
||||||
|
return errors.Errorf("squash currently not implemented")
|
||||||
|
}
|
||||||
|
if in.quiet {
|
||||||
|
logrus.Warnf("quiet currently not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
ctx, end, err := tracing.TraceCurrentCommand(ctx, "build")
|
ctx, end, err := tracing.TraceCurrentCommand(ctx, "build")
|
||||||
@@ -100,40 +108,21 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
|||||||
pull = *in.pull
|
pull = *in.pull
|
||||||
}
|
}
|
||||||
|
|
||||||
if noCache && len(in.noCacheFilter) > 0 {
|
|
||||||
return errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
|
|
||||||
}
|
|
||||||
|
|
||||||
if in.quiet && in.progress != "auto" && in.progress != "quiet" {
|
|
||||||
return errors.Errorf("progress=%s and quiet cannot be used together", in.progress)
|
|
||||||
} else if in.quiet {
|
|
||||||
in.progress = "quiet"
|
|
||||||
}
|
|
||||||
|
|
||||||
contexts, err := parseContextNames(in.contexts)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := build.Options{
|
opts := build.Options{
|
||||||
Inputs: build.Inputs{
|
Inputs: build.Inputs{
|
||||||
ContextPath: in.contextPath,
|
ContextPath: in.contextPath,
|
||||||
DockerfilePath: in.dockerfileName,
|
DockerfilePath: in.dockerfileName,
|
||||||
InStream: os.Stdin,
|
InStream: os.Stdin,
|
||||||
NamedContexts: contexts,
|
|
||||||
},
|
},
|
||||||
BuildArgs: listToMap(in.buildArgs, true),
|
Tags: in.tags,
|
||||||
ExtraHosts: in.extraHosts,
|
Labels: listToMap(in.labels, false),
|
||||||
ImageIDFile: in.imageIDFile,
|
BuildArgs: listToMap(in.buildArgs, true),
|
||||||
Labels: listToMap(in.labels, false),
|
Pull: pull,
|
||||||
NetworkMode: in.networkMode,
|
NoCache: noCache,
|
||||||
NoCache: noCache,
|
Target: in.target,
|
||||||
NoCacheFilter: in.noCacheFilter,
|
ImageIDFile: in.imageIDFile,
|
||||||
Pull: pull,
|
ExtraHosts: in.extraHosts,
|
||||||
ShmSize: in.shmSize,
|
NetworkMode: in.networkMode,
|
||||||
Tags: in.tags,
|
|
||||||
Target: in.target,
|
|
||||||
Ulimits: in.ulimits,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
platforms, err := platformutil.Parse(in.platforms)
|
platforms, err := platformutil.Parse(in.platforms)
|
||||||
@@ -225,98 +214,43 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
|||||||
contextPathHash = in.contextPath
|
contextPathHash = in.contextPath
|
||||||
}
|
}
|
||||||
|
|
||||||
imageID, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile)
|
return buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile)
|
||||||
err = wrapBuildError(err, false)
|
}
|
||||||
|
|
||||||
|
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string, metadataFile string) error {
|
||||||
|
dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if in.quiet {
|
|
||||||
fmt.Println(imageID)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string, metadataFile string) (imageID string, err error) {
|
|
||||||
dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancel(context.TODO())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
printer := progress.NewPrinter(ctx2, os.Stderr, progressMode)
|
||||||
|
|
||||||
printer := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, progressMode)
|
resp, err := build.Build(ctx, dis, opts, dockerAPI(dockerCli), dockerCli.ConfigFile(), printer)
|
||||||
|
|
||||||
resp, err := build.Build(ctx, dis, opts, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
|
||||||
err1 := printer.Wait()
|
err1 := printer.Wait()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = err1
|
err = err1
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(metadataFile) > 0 && resp != nil {
|
if len(metadataFile) > 0 && resp != nil {
|
||||||
if err := writeMetadataFile(metadataFile, decodeExporterResponse(resp[defaultTargetName].ExporterResponse)); err != nil {
|
mdatab, err := json.MarshalIndent(resp[defaultTargetName].ExporterResponse, "", " ")
|
||||||
return "", err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ioutils.AtomicWriteFile(metadataFile, mdatab, 0644); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
printWarnings(os.Stderr, printer.Warnings(), progressMode)
|
return err
|
||||||
|
|
||||||
return resp[defaultTargetName].ExporterResponse["containerimage.digest"], err
|
|
||||||
}
|
|
||||||
|
|
||||||
func printWarnings(w io.Writer, warnings []client.VertexWarning, mode string) {
|
|
||||||
if len(warnings) == 0 || mode == progress.PrinterModeQuiet {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "\n ")
|
|
||||||
sb := &bytes.Buffer{}
|
|
||||||
if len(warnings) == 1 {
|
|
||||||
fmt.Fprintf(sb, "1 warning found")
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(sb, "%d warnings found", len(warnings))
|
|
||||||
}
|
|
||||||
if logrus.GetLevel() < logrus.DebugLevel {
|
|
||||||
fmt.Fprintf(sb, " (use --debug to expand)")
|
|
||||||
}
|
|
||||||
fmt.Fprintf(sb, ":\n")
|
|
||||||
fmt.Fprint(w, aec.Apply(sb.String(), aec.YellowF))
|
|
||||||
|
|
||||||
for _, warn := range warnings {
|
|
||||||
fmt.Fprintf(w, " - %s\n", warn.Short)
|
|
||||||
if logrus.GetLevel() < logrus.DebugLevel {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, d := range warn.Detail {
|
|
||||||
fmt.Fprintf(w, "%s\n", d)
|
|
||||||
}
|
|
||||||
if warn.URL != "" {
|
|
||||||
fmt.Fprintf(w, "More info: %s\n", warn.URL)
|
|
||||||
}
|
|
||||||
if warn.SourceInfo != nil && warn.Range != nil {
|
|
||||||
src := errdefs.Source{
|
|
||||||
Info: warn.SourceInfo,
|
|
||||||
Ranges: warn.Range,
|
|
||||||
}
|
|
||||||
src.Print(w)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(w, "\n")
|
|
||||||
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newBuildOptions() buildOptions {
|
|
||||||
ulimits := make(map[string]*units.Ulimit)
|
|
||||||
return buildOptions{
|
|
||||||
ulimits: dockeropts.NewUlimitOpt(&ulimits),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||||
options := newBuildOptions()
|
var options buildOptions
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "build [OPTIONS] PATH | URL | -",
|
Use: "build [OPTIONS] PATH | URL | -",
|
||||||
@@ -326,140 +260,103 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.contextPath = args[0]
|
options.contextPath = args[0]
|
||||||
options.builder = rootOpts.builder
|
options.builder = rootOpts.builder
|
||||||
cmd.Flags().VisitAll(checkWarnedFlags)
|
|
||||||
return runBuild(dockerCli, options)
|
return runBuild(dockerCli, options)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
var platformsDefault []string
|
|
||||||
if v := os.Getenv("DOCKER_DEFAULT_PLATFORM"); v != "" {
|
|
||||||
platformsDefault = []string{v}
|
|
||||||
}
|
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
|
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for --output=type=registry")
|
||||||
flags.SetAnnotation("add-host", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host"})
|
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for --output=type=docker")
|
||||||
|
|
||||||
flags.StringSliceVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
|
|
||||||
|
|
||||||
|
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Name and optionally a tag in the 'name:tag' format")
|
||||||
|
flags.SetAnnotation("tag", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t"})
|
||||||
flags.StringArrayVar(&options.buildArgs, "build-arg", []string{}, "Set build-time variables")
|
flags.StringArrayVar(&options.buildArgs, "build-arg", []string{}, "Set build-time variables")
|
||||||
|
flags.SetAnnotation("build-arg", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg"})
|
||||||
|
|
||||||
flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, `External cache sources (e.g., "user/app:cache", "type=local,src=path/to/dir")`)
|
flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
|
||||||
|
flags.SetAnnotation("file", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f"})
|
||||||
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, `Cache export destinations (e.g., "user/app:cache", "type=local,dest=path/to/dir")`)
|
|
||||||
|
|
||||||
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
|
|
||||||
flags.SetAnnotation("cgroup-parent", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent"})
|
|
||||||
|
|
||||||
flags.StringArrayVar(&options.contexts, "build-context", []string{}, "Additional build contexts (e.g., name=path)")
|
|
||||||
|
|
||||||
flags.StringVarP(&options.dockerfileName, "file", "f", "", `Name of the Dockerfile (default: "PATH/Dockerfile")`)
|
|
||||||
flags.SetAnnotation("file", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f"})
|
|
||||||
|
|
||||||
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
|
|
||||||
|
|
||||||
flags.StringArrayVar(&options.labels, "label", []string{}, "Set metadata for an image")
|
flags.StringArrayVar(&options.labels, "label", []string{}, "Set metadata for an image")
|
||||||
|
|
||||||
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--output=type=docker"`)
|
flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, "External cache sources (eg. user/app:cache, type=local,src=path/to/dir)")
|
||||||
|
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, "Cache export destinations (eg. user/app:cache, type=local,dest=path/to/dir)")
|
||||||
|
|
||||||
flags.StringVar(&options.networkMode, "network", "default", `Set the networking mode for the "RUN" instructions during build`)
|
flags.StringVar(&options.target, "target", "", "Set the target build stage to build.")
|
||||||
|
flags.SetAnnotation("target", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target"})
|
||||||
|
|
||||||
flags.StringArrayVar(&options.noCacheFilter, "no-cache-filter", []string{}, "Do not cache specified stages")
|
flags.StringSliceVar(&options.allow, "allow", []string{}, "Allow extra privileged entitlement, e.g. network.host, security.insecure")
|
||||||
|
|
||||||
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, `Output destination (format: "type=local,dest=path")`)
|
|
||||||
|
|
||||||
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
|
|
||||||
|
|
||||||
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--output=type=registry"`)
|
|
||||||
|
|
||||||
|
// not implemented
|
||||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
||||||
|
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
|
||||||
flags.StringArrayVar(&options.secrets, "secret", []string{}, `Secret to expose to the build (format: "id=mysecret[,src=/local/secret]")`)
|
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, "Add a custom host-to-IP mapping (host:ip)")
|
||||||
|
flags.SetAnnotation("add-host", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host"})
|
||||||
flags.Var(&options.shmSize, "shm-size", `Size of "/dev/shm"`)
|
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
|
||||||
|
flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer")
|
||||||
flags.StringArrayVar(&options.ssh, "ssh", []string{}, `SSH agent socket or keys to expose to the build (format: "default|<id>[=<socket>|<key>[,<key>]]")`)
|
flags.MarkHidden("quiet")
|
||||||
|
flags.MarkHidden("squash")
|
||||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, `Name and optionally a tag (format: "name:tag")`)
|
|
||||||
flags.SetAnnotation("tag", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t"})
|
|
||||||
|
|
||||||
flags.StringVar(&options.target, "target", "", "Set the target build stage to build")
|
|
||||||
flags.SetAnnotation("target", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target"})
|
|
||||||
|
|
||||||
flags.Var(options.ulimits, "ulimit", "Ulimit options")
|
|
||||||
|
|
||||||
// hidden flags
|
// hidden flags
|
||||||
var ignore string
|
var ignore string
|
||||||
var ignoreSlice []string
|
var ignoreSlice []string
|
||||||
var ignoreBool bool
|
var ignoreBool bool
|
||||||
var ignoreInt int64
|
var ignoreInt int64
|
||||||
|
flags.StringVar(&ignore, "ulimit", "", "Ulimit options")
|
||||||
flags.BoolVar(&ignoreBool, "compress", false, "Compress the build context using gzip")
|
flags.MarkHidden("ulimit")
|
||||||
flags.MarkHidden("compress")
|
|
||||||
|
|
||||||
flags.StringVar(&ignore, "isolation", "", "Container isolation technology")
|
|
||||||
flags.MarkHidden("isolation")
|
|
||||||
flags.SetAnnotation("isolation", "flag-warn", []string{"isolation flag is deprecated with BuildKit."})
|
|
||||||
|
|
||||||
flags.StringSliceVar(&ignoreSlice, "security-opt", []string{}, "Security options")
|
flags.StringSliceVar(&ignoreSlice, "security-opt", []string{}, "Security options")
|
||||||
flags.MarkHidden("security-opt")
|
flags.MarkHidden("security-opt")
|
||||||
flags.SetAnnotation("security-opt", "flag-warn", []string{`security-opt flag is deprecated. "RUN --security=insecure" should be used with BuildKit.`})
|
flags.BoolVar(&ignoreBool, "compress", false, "Compress the build context using gzip")
|
||||||
|
flags.MarkHidden("compress")
|
||||||
flags.BoolVar(&ignoreBool, "squash", false, "Squash newly built layers into a single new layer")
|
|
||||||
flags.MarkHidden("squash")
|
|
||||||
flags.SetAnnotation("squash", "flag-warn", []string{"experimental flag squash is removed with BuildKit. You should squash inside build using a multi-stage Dockerfile for efficiency."})
|
|
||||||
|
|
||||||
flags.StringVarP(&ignore, "memory", "m", "", "Memory limit")
|
flags.StringVarP(&ignore, "memory", "m", "", "Memory limit")
|
||||||
flags.MarkHidden("memory")
|
flags.MarkHidden("memory")
|
||||||
|
flags.StringVar(&ignore, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap")
|
||||||
flags.StringVar(&ignore, "memory-swap", "", `Swap limit equal to memory plus swap: "-1" to enable unlimited swap`)
|
|
||||||
flags.MarkHidden("memory-swap")
|
flags.MarkHidden("memory-swap")
|
||||||
|
flags.StringVar(&ignore, "shm-size", "", "Size of /dev/shm")
|
||||||
|
flags.MarkHidden("shm-size")
|
||||||
flags.Int64VarP(&ignoreInt, "cpu-shares", "c", 0, "CPU shares (relative weight)")
|
flags.Int64VarP(&ignoreInt, "cpu-shares", "c", 0, "CPU shares (relative weight)")
|
||||||
flags.MarkHidden("cpu-shares")
|
flags.MarkHidden("cpu-shares")
|
||||||
|
|
||||||
flags.Int64Var(&ignoreInt, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
|
flags.Int64Var(&ignoreInt, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
|
||||||
flags.MarkHidden("cpu-period")
|
flags.MarkHidden("cpu-period")
|
||||||
|
|
||||||
flags.Int64Var(&ignoreInt, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
|
flags.Int64Var(&ignoreInt, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
|
||||||
flags.MarkHidden("cpu-quota")
|
flags.MarkHidden("cpu-quota")
|
||||||
|
flags.StringVar(&ignore, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
|
||||||
flags.StringVar(&ignore, "cpuset-cpus", "", `CPUs in which to allow execution ("0-3", "0,1")`)
|
|
||||||
flags.MarkHidden("cpuset-cpus")
|
flags.MarkHidden("cpuset-cpus")
|
||||||
|
flags.StringVar(&ignore, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)")
|
||||||
flags.StringVar(&ignore, "cpuset-mems", "", `MEMs in which to allow execution ("0-3", "0,1")`)
|
|
||||||
flags.MarkHidden("cpuset-mems")
|
flags.MarkHidden("cpuset-mems")
|
||||||
|
flags.StringVar(&ignore, "cgroup-parent", "", "Optional parent cgroup for the container")
|
||||||
|
flags.MarkHidden("cgroup-parent")
|
||||||
|
flags.StringVar(&ignore, "isolation", "", "Container isolation technology")
|
||||||
|
flags.MarkHidden("isolation")
|
||||||
flags.BoolVar(&ignoreBool, "rm", true, "Remove intermediate containers after a successful build")
|
flags.BoolVar(&ignoreBool, "rm", true, "Remove intermediate containers after a successful build")
|
||||||
flags.MarkHidden("rm")
|
flags.MarkHidden("rm")
|
||||||
|
|
||||||
flags.BoolVar(&ignoreBool, "force-rm", false, "Always remove intermediate containers")
|
flags.BoolVar(&ignoreBool, "force-rm", false, "Always remove intermediate containers")
|
||||||
flags.MarkHidden("force-rm")
|
flags.MarkHidden("force-rm")
|
||||||
|
|
||||||
|
platformsDefault := []string{}
|
||||||
|
if v := os.Getenv("DOCKER_DEFAULT_PLATFORM"); v != "" {
|
||||||
|
platformsDefault = []string{v}
|
||||||
|
}
|
||||||
|
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
|
||||||
|
|
||||||
|
flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build: id=mysecret,src=/local/secret")
|
||||||
|
|
||||||
|
flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (format: default|<id>[=<socket>|<key>[,<key>]])")
|
||||||
|
|
||||||
|
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: type=local,dest=path)")
|
||||||
|
|
||||||
commonBuildFlags(&options.commonOptions, flags)
|
commonBuildFlags(&options.commonOptions, flags)
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) {
|
func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) {
|
||||||
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
|
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
|
||||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
|
flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (auto, plain, tty). Use plain to show container output")
|
||||||
options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images")
|
options.pull = flags.Bool("pull", false, "Always attempt to pull a newer version of the image")
|
||||||
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to the file")
|
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to the file")
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkWarnedFlags(f *pflag.Flag) {
|
|
||||||
if !f.Changed {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for t, m := range f.Annotations {
|
|
||||||
switch t {
|
|
||||||
case "flag-warn":
|
|
||||||
logrus.Warn(m[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func listToMap(values []string, defaultEnv bool) map[string]string {
|
func listToMap(values []string, defaultEnv bool) map[string]string {
|
||||||
result := make(map[string]string, len(values))
|
result := make(map[string]string, len(values))
|
||||||
for _, value := range values {
|
for _, value := range values {
|
||||||
@@ -479,80 +376,3 @@ func listToMap(values []string, defaultEnv bool) map[string]string {
|
|||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseContextNames(values []string) (map[string]build.NamedContext, error) {
|
|
||||||
if len(values) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
result := make(map[string]build.NamedContext, len(values))
|
|
||||||
for _, value := range values {
|
|
||||||
kv := strings.SplitN(value, "=", 2)
|
|
||||||
if len(kv) != 2 {
|
|
||||||
return nil, errors.Errorf("invalid context value: %s, expected key=value", value)
|
|
||||||
}
|
|
||||||
named, err := reference.ParseNormalizedNamed(kv[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "invalid context name %s", kv[0])
|
|
||||||
}
|
|
||||||
name := strings.TrimSuffix(reference.FamiliarString(named), ":latest")
|
|
||||||
result[name] = build.NamedContext{Path: kv[1]}
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeMetadataFile(filename string, dt interface{}) error {
|
|
||||||
b, err := json.MarshalIndent(dt, "", " ")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return ioutils.AtomicWriteFile(filename, b, 0644)
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} {
|
|
||||||
out := make(map[string]interface{})
|
|
||||||
for k, v := range exporterResponse {
|
|
||||||
dt, err := base64.StdEncoding.DecodeString(v)
|
|
||||||
if err != nil {
|
|
||||||
out[k] = v
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
var raw map[string]interface{}
|
|
||||||
if err = json.Unmarshal(dt, &raw); err != nil || len(raw) == 0 {
|
|
||||||
out[k] = v
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
out[k] = json.RawMessage(dt)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func wrapBuildError(err error, bake bool) error {
|
|
||||||
if err == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
st, ok := grpcerrors.AsGRPCStatus(err)
|
|
||||||
if ok {
|
|
||||||
if st.Code() == codes.Unimplemented && strings.Contains(st.Message(), "unsupported frontend capability moby.buildkit.frontend.contexts") {
|
|
||||||
msg := "current frontend does not support --build-context."
|
|
||||||
if bake {
|
|
||||||
msg = "current frontend does not support defining additional contexts for targets."
|
|
||||||
}
|
|
||||||
msg += " Named contexts are supported since Dockerfile v1.4. Use #syntax directive in Dockerfile or update to latest BuildKit."
|
|
||||||
return &wrapped{err, msg}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
type wrapped struct {
|
|
||||||
err error
|
|
||||||
msg string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *wrapped) Error() string {
|
|
||||||
return w.msg
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *wrapped) Unwrap() error {
|
|
||||||
return w.err
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,19 +1,14 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/csv"
|
"encoding/csv"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/google/shlex"
|
"github.com/google/shlex"
|
||||||
@@ -34,7 +29,6 @@ type createOptions struct {
|
|||||||
flags string
|
flags string
|
||||||
configFile string
|
configFile string
|
||||||
driverOpts []string
|
driverOpts []string
|
||||||
bootstrap bool
|
|
||||||
// upgrade bool // perform upgrade of the driver
|
// upgrade bool // perform upgrade of the driver
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -76,7 +70,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
return errors.Errorf("failed to find driver %q", in.driver)
|
return errors.Errorf("failed to find driver %q", in.driver)
|
||||||
}
|
}
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := getStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -144,7 +138,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
|
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
|
||||||
}
|
}
|
||||||
|
|
||||||
ep, err = storeutil.GetCurrentEndpoint(dockerCli)
|
ep, err = getCurrentEndpoint(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -176,7 +170,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if in.use && ep != "" {
|
if in.use && ep != "" {
|
||||||
current, err := storeutil.GetCurrentEndpoint(dockerCli)
|
current, err := getCurrentEndpoint(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -185,21 +179,6 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ngi := &nginfo{ng: ng}
|
|
||||||
|
|
||||||
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
if err = loadNodeGroupData(timeoutCtx, dockerCli, ngi); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if in.bootstrap {
|
|
||||||
if _, err = boot(ctx, ngi); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Printf("%s\n", ng.Name)
|
fmt.Printf("%s\n", ng.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -207,12 +186,9 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
func createCmd(dockerCli command.Cli) *cobra.Command {
|
func createCmd(dockerCli command.Cli) *cobra.Command {
|
||||||
var options createOptions
|
var options createOptions
|
||||||
|
|
||||||
var drivers bytes.Buffer
|
var drivers []string
|
||||||
for _, d := range driver.GetFactories() {
|
for s := range driver.GetFactories() {
|
||||||
if len(drivers.String()) > 0 {
|
drivers = append(drivers, s)
|
||||||
drivers.WriteString(", ")
|
|
||||||
}
|
|
||||||
drivers.WriteString(fmt.Sprintf(`"%s"`, d.Name()))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
@@ -227,20 +203,18 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.StringVar(&options.name, "name", "", "Builder instance name")
|
flags.StringVar(&options.name, "name", "", "Builder instance name")
|
||||||
flags.StringVar(&options.driver, "driver", "", fmt.Sprintf("Driver to use (available: %s)", drivers.String()))
|
flags.StringVar(&options.driver, "driver", "", fmt.Sprintf("Driver to use (available: %v)", drivers))
|
||||||
flags.StringVar(&options.nodeName, "node", "", "Create/modify node with given name")
|
flags.StringVar(&options.nodeName, "node", "", "Create/modify node with given name")
|
||||||
flags.StringVar(&options.flags, "buildkitd-flags", "", "Flags for buildkitd daemon")
|
flags.StringVar(&options.flags, "buildkitd-flags", "", "Flags for buildkitd daemon")
|
||||||
flags.StringVar(&options.configFile, "config", "", "BuildKit config file")
|
flags.StringVar(&options.configFile, "config", "", "BuildKit config file")
|
||||||
flags.StringArrayVar(&options.platform, "platform", []string{}, "Fixed platforms for current node")
|
flags.StringArrayVar(&options.platform, "platform", []string{}, "Fixed platforms for current node")
|
||||||
flags.StringArrayVar(&options.driverOpts, "driver-opt", []string{}, "Options for the driver")
|
flags.StringArrayVar(&options.driverOpts, "driver-opt", []string{}, "Options for the driver")
|
||||||
flags.BoolVar(&options.bootstrap, "bootstrap", false, "Boot builder after creation")
|
|
||||||
|
|
||||||
flags.BoolVar(&options.actionAppend, "append", false, "Append a node to builder instead of changing it")
|
flags.BoolVar(&options.actionAppend, "append", false, "Append a node to builder instead of changing it")
|
||||||
flags.BoolVar(&options.actionLeave, "leave", false, "Remove a node from builder instead of changing it")
|
flags.BoolVar(&options.actionLeave, "leave", false, "Remove a node from builder instead of changing it")
|
||||||
flags.BoolVar(&options.use, "use", false, "Set the current builder instance")
|
flags.BoolVar(&options.use, "use", false, "Set the current builder instance")
|
||||||
|
|
||||||
// hide builder persistent flag for this command
|
_ = flags
|
||||||
cobrautil.HideInheritedFlags(cmd, "builder")
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,18 +4,16 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
|
||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/opts"
|
"github.com/docker/cli/opts"
|
||||||
"github.com/docker/go-units"
|
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/tonistiigi/units"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -127,20 +125,20 @@ func printKV(w io.Writer, k string, v interface{}) {
|
|||||||
func printVerbose(tw *tabwriter.Writer, du []*client.UsageInfo) {
|
func printVerbose(tw *tabwriter.Writer, du []*client.UsageInfo) {
|
||||||
for _, di := range du {
|
for _, di := range du {
|
||||||
printKV(tw, "ID", di.ID)
|
printKV(tw, "ID", di.ID)
|
||||||
if len(di.Parents) != 0 {
|
if di.Parent != "" {
|
||||||
printKV(tw, "Parent", strings.Join(di.Parents, ","))
|
printKV(tw, "Parent", di.Parent)
|
||||||
}
|
}
|
||||||
printKV(tw, "Created at", di.CreatedAt)
|
printKV(tw, "Created at", di.CreatedAt)
|
||||||
printKV(tw, "Mutable", di.Mutable)
|
printKV(tw, "Mutable", di.Mutable)
|
||||||
printKV(tw, "Reclaimable", !di.InUse)
|
printKV(tw, "Reclaimable", !di.InUse)
|
||||||
printKV(tw, "Shared", di.Shared)
|
printKV(tw, "Shared", di.Shared)
|
||||||
printKV(tw, "Size", units.HumanSize(float64(di.Size)))
|
printKV(tw, "Size", fmt.Sprintf("%.2f", units.Bytes(di.Size)))
|
||||||
if di.Description != "" {
|
if di.Description != "" {
|
||||||
printKV(tw, "Description", di.Description)
|
printKV(tw, "Description", di.Description)
|
||||||
}
|
}
|
||||||
printKV(tw, "Usage count", di.UsageCount)
|
printKV(tw, "Usage count", di.UsageCount)
|
||||||
if di.LastUsedAt != nil {
|
if di.LastUsedAt != nil {
|
||||||
printKV(tw, "Last used", units.HumanDuration(time.Since(*di.LastUsedAt))+" ago")
|
printKV(tw, "Last used", di.LastUsedAt)
|
||||||
}
|
}
|
||||||
if di.RecordType != "" {
|
if di.RecordType != "" {
|
||||||
printKV(tw, "Type", di.RecordType)
|
printKV(tw, "Type", di.RecordType)
|
||||||
@@ -161,15 +159,11 @@ func printTableRow(tw *tabwriter.Writer, di *client.UsageInfo) {
|
|||||||
if di.Mutable {
|
if di.Mutable {
|
||||||
id += "*"
|
id += "*"
|
||||||
}
|
}
|
||||||
size := units.HumanSize(float64(di.Size))
|
size := fmt.Sprintf("%.2f", units.Bytes(di.Size))
|
||||||
if di.Shared {
|
if di.Shared {
|
||||||
size += "*"
|
size += "*"
|
||||||
}
|
}
|
||||||
lastAccessed := ""
|
fmt.Fprintf(tw, "%-71s\t%-11v\t%s\t\n", id, !di.InUse, size)
|
||||||
if di.LastUsedAt != nil {
|
|
||||||
lastAccessed = units.HumanDuration(time.Since(*di.LastUsedAt)) + " ago"
|
|
||||||
}
|
|
||||||
fmt.Fprintf(tw, "%-40s\t%-5v\t%-10s\t%s\n", id, !di.InUse, size, lastAccessed)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) {
|
func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) {
|
||||||
@@ -192,11 +186,11 @@ func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if shared > 0 {
|
if shared > 0 {
|
||||||
fmt.Fprintf(tw, "Shared:\t%s\n", units.HumanSize(float64(shared)))
|
fmt.Fprintf(tw, "Shared:\t%.2f\n", units.Bytes(shared))
|
||||||
fmt.Fprintf(tw, "Private:\t%s\n", units.HumanSize(float64(total-shared)))
|
fmt.Fprintf(tw, "Private:\t%.2f\n", units.Bytes(total-shared))
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(tw, "Reclaimable:\t%s\n", units.HumanSize(float64(reclaimable)))
|
fmt.Fprintf(tw, "Reclaimable:\t%.2f\n", units.Bytes(reclaimable))
|
||||||
fmt.Fprintf(tw, "Total:\t%s\n", units.HumanSize(float64(total)))
|
fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total))
|
||||||
tw.Flush()
|
tw.Flush()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,8 +6,6 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/store"
|
|
||||||
"github.com/docker/buildx/store/storeutil"
|
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
@@ -20,7 +18,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type createOptions struct {
|
type createOptions struct {
|
||||||
builder string
|
|
||||||
files []string
|
files []string
|
||||||
tags []string
|
tags []string
|
||||||
dryrun bool
|
dryrun bool
|
||||||
@@ -104,32 +101,9 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
|
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
r := imagetools.New(imagetools.Opt{
|
||||||
if err != nil {
|
Auth: dockerCli.ConfigFile(),
|
||||||
return err
|
})
|
||||||
}
|
|
||||||
defer release()
|
|
||||||
|
|
||||||
var ng *store.NodeGroup
|
|
||||||
|
|
||||||
if in.builder != "" {
|
|
||||||
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
r := imagetools.New(imageopt)
|
|
||||||
|
|
||||||
if sourceRefs {
|
if sourceRefs {
|
||||||
eg, ctx2 := errgroup.WithContext(ctx)
|
eg, ctx2 := errgroup.WithContext(ctx)
|
||||||
@@ -178,7 +152,9 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// new resolver cause need new auth
|
// new resolver cause need new auth
|
||||||
r = imagetools.New(imageopt)
|
r = imagetools.New(imagetools.Opt{
|
||||||
|
Auth: dockerCli.ConfigFile(),
|
||||||
|
})
|
||||||
|
|
||||||
for _, t := range tags {
|
for _, t := range tags {
|
||||||
if err := r.Push(ctx, t, desc, dt); err != nil {
|
if err := r.Push(ctx, t, desc, dt); err != nil {
|
||||||
@@ -248,24 +224,26 @@ func parseSource(in string) (*src, error) {
|
|||||||
return &s, nil
|
return &s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
func createCmd(dockerCli command.Cli) *cobra.Command {
|
||||||
var options createOptions
|
var options createOptions
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "create [OPTIONS] [SOURCE] [SOURCE...]",
|
Use: "create [OPTIONS] [SOURCE] [SOURCE...]",
|
||||||
Short: "Create a new image based on source images",
|
Short: "Create a new image based on source images",
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.builder = opts.Builder
|
|
||||||
return runCreate(dockerCli, options, args)
|
return runCreate(dockerCli, options, args)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Read source descriptor from file")
|
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Read source descriptor from file")
|
||||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image")
|
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image")
|
||||||
flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing")
|
flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing")
|
||||||
flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest")
|
flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest")
|
||||||
|
|
||||||
|
_ = flags
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,82 +1,68 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/docker/buildx/store"
|
"fmt"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
"os"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/images"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/cli-docs-tool/annotation"
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/pkg/errors"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
type inspectOptions struct {
|
type inspectOptions struct {
|
||||||
builder string
|
raw bool
|
||||||
format string
|
|
||||||
raw bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
if in.format != "" && in.raw {
|
r := imagetools.New(imagetools.Opt{
|
||||||
return errors.Errorf("format and raw cannot be used together")
|
Auth: dockerCli.ConfigFile(),
|
||||||
}
|
})
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
dt, desc, err := r.Get(ctx, name)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer release()
|
|
||||||
|
|
||||||
var ng *store.NodeGroup
|
|
||||||
|
|
||||||
if in.builder != "" {
|
|
||||||
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
p, err := imagetools.NewPrinter(ctx, imageopt, name, in.format)
|
if in.raw {
|
||||||
if err != nil {
|
fmt.Printf("%s", dt) // avoid newline to keep digest
|
||||||
return err
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return p.Print(in.raw, dockerCli.Out())
|
switch desc.MediaType {
|
||||||
|
// case images.MediaTypeDockerSchema2Manifest, specs.MediaTypeImageManifest:
|
||||||
|
// TODO: handle distribution manifest and schema1
|
||||||
|
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||||
|
return imagetools.PrintManifestList(dt, desc, name, os.Stdout)
|
||||||
|
default:
|
||||||
|
fmt.Printf("%s\n", dt)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
func inspectCmd(dockerCli command.Cli) *cobra.Command {
|
||||||
var options inspectOptions
|
var options inspectOptions
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "inspect [OPTIONS] NAME",
|
Use: "inspect [OPTIONS] NAME",
|
||||||
Short: "Show details of an image in the registry",
|
Short: "Show details of image in the registry",
|
||||||
Args: cli.ExactArgs(1),
|
Args: cli.ExactArgs(1),
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.builder = rootOpts.Builder
|
|
||||||
return runInspect(dockerCli, options, args[0])
|
return runInspect(dockerCli, options, args[0])
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.StringVar(&options.format, "format", "", "Format the output using the given Go template")
|
flags.BoolVar(&options.raw, "raw", false, "Show original JSON manifest")
|
||||||
flags.SetAnnotation("format", annotation.DefaultValue, []string{`"{{.Manifest}}"`})
|
|
||||||
|
|
||||||
flags.BoolVar(&options.raw, "raw", false, "Show original, unformatted JSON manifest")
|
_ = flags
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,19 +5,15 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
type RootOptions struct {
|
func RootCmd(dockerCli command.Cli) *cobra.Command {
|
||||||
Builder string
|
|
||||||
}
|
|
||||||
|
|
||||||
func RootCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "imagetools",
|
Use: "imagetools",
|
||||||
Short: "Commands to work on images in registry",
|
Short: "Commands to work on images in registry",
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.AddCommand(
|
cmd.AddCommand(
|
||||||
createCmd(dockerCli, opts),
|
inspectCmd(dockerCli),
|
||||||
inspectCmd(dockerCli, opts),
|
createCmd(dockerCli),
|
||||||
)
|
)
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|||||||
@@ -8,13 +8,17 @@ import (
|
|||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
type inspectOptions struct {
|
type inspectOptions struct {
|
||||||
@@ -22,10 +26,23 @@ type inspectOptions struct {
|
|||||||
builder string
|
builder string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type dinfo struct {
|
||||||
|
di *build.DriverInfo
|
||||||
|
info *driver.Info
|
||||||
|
platforms []specs.Platform
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type nginfo struct {
|
||||||
|
ng *store.NodeGroup
|
||||||
|
drivers []dinfo
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := getStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -34,12 +51,12 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
|||||||
var ng *store.NodeGroup
|
var ng *store.NodeGroup
|
||||||
|
|
||||||
if in.builder != "" {
|
if in.builder != "" {
|
||||||
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
ng, err = getNodeGroup(txn, dockerCli, in.builder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
ng, err = getCurrentInstance(txn, dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -65,7 +82,7 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
|||||||
var bootNgi *nginfo
|
var bootNgi *nginfo
|
||||||
if in.bootstrap {
|
if in.bootstrap {
|
||||||
var ok bool
|
var ok bool
|
||||||
ok, err = boot(ctx, ngi)
|
ok, err = boot(ctx, ngi, dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -132,7 +149,50 @@ func inspectCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.BoolVar(&options.bootstrap, "bootstrap", false, "Ensure builder has booted before inspecting")
|
flags.BoolVar(&options.bootstrap, "bootstrap", false, "Ensure builder has booted before inspecting")
|
||||||
|
|
||||||
|
_ = flags
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func boot(ctx context.Context, ngi *nginfo, dockerCli command.Cli) (bool, error) {
|
||||||
|
toBoot := make([]int, 0, len(ngi.drivers))
|
||||||
|
for i, d := range ngi.drivers {
|
||||||
|
if d.err != nil || d.di.Err != nil || d.di.Driver == nil || d.info == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if d.info.Status != driver.Running {
|
||||||
|
toBoot = append(toBoot, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(toBoot) == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
printer := progress.NewPrinter(context.TODO(), os.Stderr, "auto")
|
||||||
|
|
||||||
|
baseCtx := ctx
|
||||||
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
|
for _, idx := range toBoot {
|
||||||
|
func(idx int) {
|
||||||
|
eg.Go(func() error {
|
||||||
|
pw := progress.WithPrefix(printer, ngi.ng.Nodes[idx].Name, len(toBoot) > 1)
|
||||||
|
_, err := driver.Boot(ctx, baseCtx, ngi.drivers[idx].di.Driver, pw)
|
||||||
|
if err != nil {
|
||||||
|
ngi.drivers[idx].err = err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}(idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := eg.Wait()
|
||||||
|
err1 := printer.Wait()
|
||||||
|
if err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package commands
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/config"
|
"github.com/docker/cli/cli/config"
|
||||||
@@ -49,8 +48,5 @@ func installCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
Hidden: true,
|
Hidden: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
// hide builder persistent flag for this command
|
|
||||||
cobrautil.HideInheritedFlags(cmd, "builder")
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,8 +10,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
@@ -26,7 +24,7 @@ type lsOptions struct {
|
|||||||
func runLs(dockerCli command.Cli, in lsOptions) error {
|
func runLs(dockerCli command.Cli, in lsOptions) error {
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := getStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -81,7 +79,7 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
currentName := "default"
|
currentName := "default"
|
||||||
current, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
current, err := getCurrentInstance(txn, dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -149,8 +147,5 @@ func lsCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// hide builder persistent flag for this command
|
|
||||||
cobrautil.HideInheritedFlags(cmd, "builder")
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,11 +12,11 @@ import (
|
|||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/opts"
|
"github.com/docker/cli/opts"
|
||||||
"github.com/docker/docker/api/types/filters"
|
"github.com/docker/docker/api/types/filters"
|
||||||
"github.com/docker/go-units"
|
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/tonistiigi/units"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -119,7 +119,7 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
|
|||||||
<-printed
|
<-printed
|
||||||
|
|
||||||
tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
|
tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
|
||||||
fmt.Fprintf(tw, "Total:\t%s\n", units.HumanSize(float64(total)))
|
fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total))
|
||||||
tw.Flush()
|
tw.Flush()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -139,7 +139,7 @@ func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
|
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
|
||||||
flags.Var(&options.filter, "filter", `Provide filter values (e.g., "until=24h")`)
|
flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=24h')")
|
||||||
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
|
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
|
||||||
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
|
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
|
||||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
||||||
|
|||||||
@@ -2,65 +2,46 @@ package commands
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type rmOptions struct {
|
type rmOptions struct {
|
||||||
builder string
|
builder string
|
||||||
keepState bool
|
keepState bool
|
||||||
keepDaemon bool
|
|
||||||
allInactive bool
|
|
||||||
force bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
rmInactiveWarning = `WARNING! This will remove all builders that are not in running state. Are you sure you want to continue?`
|
|
||||||
)
|
|
||||||
|
|
||||||
func runRm(dockerCli command.Cli, in rmOptions) error {
|
func runRm(dockerCli command.Cli, in rmOptions) error {
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
if in.allInactive && !in.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), rmInactiveWarning) {
|
txn, release, err := getStore(dockerCli)
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer release()
|
defer release()
|
||||||
|
|
||||||
if in.allInactive {
|
|
||||||
return rmAllInactive(ctx, txn, dockerCli, in)
|
|
||||||
}
|
|
||||||
|
|
||||||
if in.builder != "" {
|
if in.builder != "" {
|
||||||
ng, err := storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
ng, err := getNodeGroup(txn, dockerCli, in.builder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err1 := rm(ctx, dockerCli, in, ng)
|
err1 := rm(ctx, dockerCli, ng, in.keepState)
|
||||||
if err := txn.Remove(ng.Name); err != nil {
|
if err := txn.Remove(ng.Name); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return err1
|
return err1
|
||||||
}
|
}
|
||||||
|
|
||||||
ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
ng, err := getCurrentInstance(txn, dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if ng != nil {
|
if ng != nil {
|
||||||
err1 := rm(ctx, dockerCli, in, ng)
|
err1 := rm(ctx, dockerCli, ng, in.keepState)
|
||||||
if err := txn.Remove(ng.Name); err != nil {
|
if err := txn.Remove(ng.Name); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -80,9 +61,6 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.builder = rootOpts.builder
|
options.builder = rootOpts.builder
|
||||||
if len(args) > 0 {
|
if len(args) > 0 {
|
||||||
if options.allInactive {
|
|
||||||
return errors.New("cannot specify builder name when --all-inactive is set")
|
|
||||||
}
|
|
||||||
options.builder = args[0]
|
options.builder = args[0]
|
||||||
}
|
}
|
||||||
return runRm(dockerCli, options)
|
return runRm(dockerCli, options)
|
||||||
@@ -91,30 +69,23 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
flags.BoolVar(&options.keepState, "keep-state", false, "Keep BuildKit state")
|
flags.BoolVar(&options.keepState, "keep-state", false, "Keep BuildKit state")
|
||||||
flags.BoolVar(&options.keepDaemon, "keep-daemon", false, "Keep the buildkitd daemon running")
|
|
||||||
flags.BoolVar(&options.allInactive, "all-inactive", false, "Remove all inactive builders")
|
|
||||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func rm(ctx context.Context, dockerCli command.Cli, in rmOptions, ng *store.NodeGroup) error {
|
func rm(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, keepState bool) error {
|
||||||
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
|
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, di := range dis {
|
for _, di := range dis {
|
||||||
if di.Driver == nil {
|
if di.Driver != nil {
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Do not stop the buildkitd daemon when --keep-daemon is provided
|
|
||||||
if !in.keepDaemon {
|
|
||||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
if err := di.Driver.Stop(ctx, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
if err := di.Driver.Rm(ctx, true, !keepState); err != nil {
|
||||||
if err := di.Driver.Rm(ctx, true, !in.keepState, !in.keepDaemon); err != nil {
|
return err
|
||||||
return err
|
}
|
||||||
}
|
}
|
||||||
if di.Err != nil {
|
if di.Err != nil {
|
||||||
err = di.Err
|
err = di.Err
|
||||||
@@ -122,42 +93,3 @@ func rm(ctx context.Context, dockerCli command.Cli, in rmOptions, ng *store.Node
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, in rmOptions) error {
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
ll, err := txn.List()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
builders := make([]*nginfo, len(ll))
|
|
||||||
for i, ng := range ll {
|
|
||||||
builders[i] = &nginfo{ng: ng}
|
|
||||||
}
|
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
|
||||||
for _, b := range builders {
|
|
||||||
func(b *nginfo) {
|
|
||||||
eg.Go(func() error {
|
|
||||||
if err := loadNodeGroupData(ctx, dockerCli, b); err != nil {
|
|
||||||
return errors.Wrapf(err, "cannot load %s", b.ng.Name)
|
|
||||||
}
|
|
||||||
if b.ng.Dynamic {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if b.inactive() {
|
|
||||||
rmerr := rm(ctx, dockerCli, in, b.ng)
|
|
||||||
if err := txn.Remove(b.ng.Name); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return rmerr
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
return eg.Wait()
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -4,23 +4,16 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
||||||
"github.com/docker/buildx/util/logutil"
|
|
||||||
"github.com/docker/cli-docs-tool/annotation"
|
|
||||||
"github.com/docker/cli/cli-plugins/plugin"
|
"github.com/docker/cli/cli-plugins/plugin"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command {
|
func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command {
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Short: "Docker Buildx",
|
Short: "Build with BuildKit",
|
||||||
Long: `Extended build capabilities with BuildKit`,
|
|
||||||
Use: name,
|
Use: name,
|
||||||
Annotations: map[string]string{
|
|
||||||
annotation.CodeDelimiter: `"`,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
if isPlugin {
|
if isPlugin {
|
||||||
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
|
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
|
||||||
@@ -28,26 +21,6 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.SetFormatter(&logutil.Formatter{})
|
|
||||||
|
|
||||||
logrus.AddHook(logutil.NewFilter([]logrus.Level{
|
|
||||||
logrus.DebugLevel,
|
|
||||||
},
|
|
||||||
"serving grpc connection",
|
|
||||||
"stopping session",
|
|
||||||
"using default config store",
|
|
||||||
))
|
|
||||||
|
|
||||||
// filter out useless commandConn.CloseWrite warning message that can occur
|
|
||||||
// when listing builder instances with "buildx ls" for those that are
|
|
||||||
// unreachable: "commandConn.CloseWrite: commandconn: failed to wait: signal: killed"
|
|
||||||
// https://github.com/docker/cli/blob/3fb4fb83dfb5db0c0753a8316f21aea54dab32c5/cli/connhelper/commandconn/commandconn.go#L203-L214
|
|
||||||
logrus.AddHook(logutil.NewFilter([]logrus.Level{
|
|
||||||
logrus.WarnLevel,
|
|
||||||
},
|
|
||||||
"commandConn.CloseWrite:",
|
|
||||||
))
|
|
||||||
|
|
||||||
addCommands(cmd, dockerCli)
|
addCommands(cmd, dockerCli)
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
@@ -74,7 +47,7 @@ func addCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
|||||||
versionCmd(dockerCli),
|
versionCmd(dockerCli),
|
||||||
pruneCmd(dockerCli, opts),
|
pruneCmd(dockerCli, opts),
|
||||||
duCmd(dockerCli, opts),
|
duCmd(dockerCli, opts),
|
||||||
imagetoolscmd.RootCmd(dockerCli, imagetoolscmd.RootOptions{Builder: opts.builder}),
|
imagetoolscmd.RootCmd(dockerCli),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
@@ -18,14 +17,14 @@ type stopOptions struct {
|
|||||||
func runStop(dockerCli command.Cli, in stopOptions) error {
|
func runStop(dockerCli command.Cli, in stopOptions) error {
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := getStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer release()
|
defer release()
|
||||||
|
|
||||||
if in.builder != "" {
|
if in.builder != "" {
|
||||||
ng, err := storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
ng, err := getNodeGroup(txn, dockerCli, in.builder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -35,7 +34,7 @@ func runStop(dockerCli command.Cli, in stopOptions) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
ng, err := getCurrentInstance(txn, dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -62,6 +61,12 @@ func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
|
||||||
|
// flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: type=local,dest=path)")
|
||||||
|
|
||||||
|
_ = flags
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package commands
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/config"
|
"github.com/docker/cli/cli/config"
|
||||||
@@ -55,8 +54,5 @@ func uninstallCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
Hidden: true,
|
Hidden: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
// hide builder persistent flag for this command
|
|
||||||
cobrautil.HideInheritedFlags(cmd, "builder")
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package commands
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/docker/buildx/store/storeutil"
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -17,7 +16,7 @@ type useOptions struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runUse(dockerCli command.Cli, in useOptions) error {
|
func runUse(dockerCli command.Cli, in useOptions) error {
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := getStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -29,7 +28,7 @@ func runUse(dockerCli command.Cli, in useOptions) error {
|
|||||||
return errors.Errorf("run `docker context use default` to switch to default context")
|
return errors.Errorf("run `docker context use default` to switch to default context")
|
||||||
}
|
}
|
||||||
if in.builder == "default" || in.builder == dockerCli.CurrentContext() {
|
if in.builder == "default" || in.builder == dockerCli.CurrentContext() {
|
||||||
ep, err := storeutil.GetCurrentEndpoint(dockerCli)
|
ep, err := getCurrentEndpoint(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -52,7 +51,7 @@ func runUse(dockerCli command.Cli, in useOptions) error {
|
|||||||
return errors.Wrapf(err, "failed to find instance %q", in.builder)
|
return errors.Wrapf(err, "failed to find instance %q", in.builder)
|
||||||
}
|
}
|
||||||
|
|
||||||
ep, err := storeutil.GetCurrentEndpoint(dockerCli)
|
ep, err := getCurrentEndpoint(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -80,8 +79,11 @@ func useCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.BoolVar(&options.isGlobal, "global", false, "Builder persists context changes")
|
flags.BoolVar(&options.isGlobal, "global", false, "Builder persists context changes")
|
||||||
flags.BoolVar(&options.isDefault, "default", false, "Set builder as default for current context")
|
flags.BoolVar(&options.isDefault, "default", false, "Set builder as default for current context")
|
||||||
|
|
||||||
|
_ = flags
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
234
commands/util.go
234
commands/util.go
@@ -4,30 +4,86 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
"github.com/docker/buildx/store/storeutil"
|
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/buildx/util/progress"
|
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/context/docker"
|
"github.com/docker/cli/cli/context/docker"
|
||||||
|
"github.com/docker/cli/cli/context/kubernetes"
|
||||||
ctxstore "github.com/docker/cli/cli/context/store"
|
ctxstore "github.com/docker/cli/cli/context/store"
|
||||||
dopts "github.com/docker/cli/opts"
|
dopts "github.com/docker/cli/opts"
|
||||||
dockerclient "github.com/docker/docker/client"
|
dockerclient "github.com/docker/docker/client"
|
||||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// getStore returns current builder instance store
|
||||||
|
func getStore(dockerCli command.Cli) (*store.Txn, func(), error) {
|
||||||
|
s, err := store.New(getConfigStorePath(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return s.Txn()
|
||||||
|
}
|
||||||
|
|
||||||
|
// getConfigStorePath will look for correct configuration store path;
|
||||||
|
// if `$BUILDX_CONFIG` is set - use it, otherwise use parent directory
|
||||||
|
// of Docker config file (i.e. `${DOCKER_CONFIG}/buildx`)
|
||||||
|
func getConfigStorePath(dockerCli command.Cli) string {
|
||||||
|
if buildxConfig := os.Getenv("BUILDX_CONFIG"); buildxConfig != "" {
|
||||||
|
logrus.Debugf("using config store %q based in \"$BUILDX_CONFIG\" environment variable", buildxConfig)
|
||||||
|
return buildxConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
buildxConfig := filepath.Join(filepath.Dir(dockerCli.ConfigFile().Filename), "buildx")
|
||||||
|
logrus.Debugf("using default config store %q", buildxConfig)
|
||||||
|
return buildxConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCurrentEndpoint returns the current default endpoint value
|
||||||
|
func getCurrentEndpoint(dockerCli command.Cli) (string, error) {
|
||||||
|
name := dockerCli.CurrentContext()
|
||||||
|
if name != "default" {
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
|
de, err := getDockerEndpoint(dockerCli, name)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Errorf("docker endpoint for %q not found", name)
|
||||||
|
}
|
||||||
|
return de, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDockerEndpoint returns docker endpoint string for given context
|
||||||
|
func getDockerEndpoint(dockerCli command.Cli, name string) (string, error) {
|
||||||
|
list, err := dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
for _, l := range list {
|
||||||
|
if l.Name == name {
|
||||||
|
ep, ok := l.Endpoints["docker"]
|
||||||
|
if !ok {
|
||||||
|
return "", errors.Errorf("context %q does not have a Docker endpoint", name)
|
||||||
|
}
|
||||||
|
typed, ok := ep.(docker.EndpointMeta)
|
||||||
|
if !ok {
|
||||||
|
return "", errors.Errorf("endpoint %q is not of type EndpointMeta, %T", ep, ep)
|
||||||
|
}
|
||||||
|
return typed.Host, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
// validateEndpoint validates that endpoint is either a context or a docker host
|
// validateEndpoint validates that endpoint is either a context or a docker host
|
||||||
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
||||||
de, err := storeutil.GetDockerEndpoint(dockerCli, ep)
|
de, err := getDockerEndpoint(dockerCli, ep)
|
||||||
if err == nil && de != "" {
|
if err == nil && de != "" {
|
||||||
if ep == "default" {
|
if ep == "default" {
|
||||||
return de, nil
|
return de, nil
|
||||||
@@ -41,6 +97,60 @@ func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
|||||||
return h, nil
|
return h, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getCurrentInstance finds the current builder instance
|
||||||
|
func getCurrentInstance(txn *store.Txn, dockerCli command.Cli) (*store.NodeGroup, error) {
|
||||||
|
ep, err := getCurrentEndpoint(dockerCli)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ng, err := txn.Current(ep)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if ng == nil {
|
||||||
|
ng, _ = getNodeGroup(txn, dockerCli, dockerCli.CurrentContext())
|
||||||
|
}
|
||||||
|
|
||||||
|
return ng, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getNodeGroup returns nodegroup based on the name
|
||||||
|
func getNodeGroup(txn *store.Txn, dockerCli command.Cli, name string) (*store.NodeGroup, error) {
|
||||||
|
ng, err := txn.NodeGroupByName(name)
|
||||||
|
if err != nil {
|
||||||
|
if !os.IsNotExist(errors.Cause(err)) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ng != nil {
|
||||||
|
return ng, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if name == "default" {
|
||||||
|
name = dockerCli.CurrentContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
list, err := dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, l := range list {
|
||||||
|
if l.Name == name {
|
||||||
|
return &store.NodeGroup{
|
||||||
|
Name: "default",
|
||||||
|
Nodes: []store.Node{
|
||||||
|
{
|
||||||
|
Name: "default",
|
||||||
|
Endpoint: name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.Errorf("no builder %q found", name)
|
||||||
|
}
|
||||||
|
|
||||||
// driversForNodeGroup returns drivers for a nodegroup instance
|
// driversForNodeGroup returns drivers for a nodegroup instance
|
||||||
func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, contextPathHash string) ([]build.DriverInfo, error) {
|
func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, contextPathHash string) ([]build.DriverInfo, error) {
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
@@ -64,18 +174,13 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
|
|||||||
}
|
}
|
||||||
ng.Driver = f.Name()
|
ng.Driver = f.Name()
|
||||||
}
|
}
|
||||||
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, n := range ng.Nodes {
|
for i, n := range ng.Nodes {
|
||||||
func(i int, n store.Node) {
|
func(i int, n store.Node) {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
di := build.DriverInfo{
|
di := build.DriverInfo{
|
||||||
Name: n.Name,
|
Name: n.Name,
|
||||||
Platform: n.Platforms,
|
Platform: n.Platforms,
|
||||||
ProxyConfig: storeutil.GetProxyConfig(dockerCli),
|
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
dis[i] = di
|
dis[i] = di
|
||||||
@@ -118,13 +223,12 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, f, dockerapi, imageopt.Auth, kcc, n.Flags, n.Files, n.DriverOpts, n.Platforms, contextPathHash)
|
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, f, dockerapi, dockerCli.ConfigFile(), kcc, n.Flags, n.ConfigFile, n.DriverOpts, n.Platforms, contextPathHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
di.Err = err
|
di.Err = err
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
di.Driver = d
|
di.Driver = d
|
||||||
di.ImageOpt = imageopt
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}(i, n)
|
}(i, n)
|
||||||
@@ -140,17 +244,16 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
|
|||||||
func configFromContext(endpointName string, s ctxstore.Reader) (clientcmd.ClientConfig, error) {
|
func configFromContext(endpointName string, s ctxstore.Reader) (clientcmd.ClientConfig, error) {
|
||||||
if strings.HasPrefix(endpointName, "kubernetes://") {
|
if strings.HasPrefix(endpointName, "kubernetes://") {
|
||||||
u, _ := url.Parse(endpointName)
|
u, _ := url.Parse(endpointName)
|
||||||
|
|
||||||
if kubeconfig := u.Query().Get("kubeconfig"); kubeconfig != "" {
|
if kubeconfig := u.Query().Get("kubeconfig"); kubeconfig != "" {
|
||||||
_ = os.Setenv(clientcmd.RecommendedConfigPathEnvVar, kubeconfig)
|
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||||
|
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig},
|
||||||
|
&clientcmd.ConfigOverrides{},
|
||||||
|
)
|
||||||
|
return clientConfig, nil
|
||||||
}
|
}
|
||||||
rules := clientcmd.NewDefaultClientConfigLoadingRules()
|
|
||||||
apiConfig, err := rules.Load()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return clientcmd.NewDefaultClientConfig(*apiConfig, &clientcmd.ConfigOverrides{}), nil
|
|
||||||
}
|
}
|
||||||
return ctxkube.ConfigFromContext(endpointName, s)
|
return kubernetes.ConfigFromContext(endpointName, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
// clientForEndpoint returns a docker client for an endpoint
|
// clientForEndpoint returns a docker client for an endpoint
|
||||||
@@ -222,7 +325,7 @@ func getInstanceOrDefault(ctx context.Context, dockerCli command.Cli, instance,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getInstanceByName(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
|
func getInstanceByName(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := getStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -237,14 +340,14 @@ func getInstanceByName(ctx context.Context, dockerCli command.Cli, instance, con
|
|||||||
|
|
||||||
// getDefaultDrivers returns drivers based on current cli config
|
// getDefaultDrivers returns drivers based on current cli config
|
||||||
func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly bool, contextPathHash string) ([]build.DriverInfo, error) {
|
func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly bool, contextPathHash string) ([]build.DriverInfo, error) {
|
||||||
txn, release, err := storeutil.GetStore(dockerCli)
|
txn, release, err := getStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer release()
|
defer release()
|
||||||
|
|
||||||
if !defaultOnly {
|
if !defaultOnly {
|
||||||
ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
ng, err := getCurrentInstance(txn, dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -254,21 +357,14 @@ func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly b
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
imageopt, err := storeutil.GetImageConfig(dockerCli, nil)
|
d, err := driver.GetDriver(ctx, "buildx_buildkit_default", nil, dockerCli.Client(), dockerCli.ConfigFile(), nil, nil, "", nil, nil, contextPathHash)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_default", nil, dockerCli.Client(), imageopt.Auth, nil, nil, nil, nil, nil, contextPathHash)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return []build.DriverInfo{
|
return []build.DriverInfo{
|
||||||
{
|
{
|
||||||
Name: "default",
|
Name: "default",
|
||||||
Driver: d,
|
Driver: d,
|
||||||
ImageOpt: imageopt,
|
|
||||||
ProxyConfig: storeutil.GetProxyConfig(dockerCli),
|
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@@ -292,7 +388,9 @@ func loadInfoData(ctx context.Context, d *dinfo) error {
|
|||||||
return errors.Wrap(err, "listing workers")
|
return errors.Wrap(err, "listing workers")
|
||||||
}
|
}
|
||||||
for _, w := range workers {
|
for _, w := range workers {
|
||||||
d.platforms = append(d.platforms, w.Platforms...)
|
for _, p := range w.Platforms {
|
||||||
|
d.platforms = append(d.platforms, p)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
d.platforms = platformutil.Dedupe(d.platforms)
|
d.platforms = platformutil.Dedupe(d.platforms)
|
||||||
}
|
}
|
||||||
@@ -377,67 +475,3 @@ func (a *api) DockerAPI(name string) (dockerclient.APIClient, error) {
|
|||||||
}
|
}
|
||||||
return clientForEndpoint(a.dockerCli, name)
|
return clientForEndpoint(a.dockerCli, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
type dinfo struct {
|
|
||||||
di *build.DriverInfo
|
|
||||||
info *driver.Info
|
|
||||||
platforms []specs.Platform
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
type nginfo struct {
|
|
||||||
ng *store.NodeGroup
|
|
||||||
drivers []dinfo
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
// inactive checks if all nodes are inactive for this builder
|
|
||||||
func (n *nginfo) inactive() bool {
|
|
||||||
for idx := range n.ng.Nodes {
|
|
||||||
d := n.drivers[idx]
|
|
||||||
if d.info != nil && d.info.Status == driver.Running {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func boot(ctx context.Context, ngi *nginfo) (bool, error) {
|
|
||||||
toBoot := make([]int, 0, len(ngi.drivers))
|
|
||||||
for i, d := range ngi.drivers {
|
|
||||||
if d.err != nil || d.di.Err != nil || d.di.Driver == nil || d.info == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if d.info.Status != driver.Running {
|
|
||||||
toBoot = append(toBoot, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(toBoot) == 0 {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
printer := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, "auto")
|
|
||||||
|
|
||||||
baseCtx := ctx
|
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
|
||||||
for _, idx := range toBoot {
|
|
||||||
func(idx int) {
|
|
||||||
eg.Go(func() error {
|
|
||||||
pw := progress.WithPrefix(printer, ngi.ng.Nodes[idx].Name, len(toBoot) > 1)
|
|
||||||
_, err := driver.Boot(ctx, baseCtx, ngi.drivers[idx].di.Driver, pw)
|
|
||||||
if err != nil {
|
|
||||||
ngi.drivers[idx].err = err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}(idx)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := eg.Wait()
|
|
||||||
err1 := printer.Wait()
|
|
||||||
if err == nil {
|
|
||||||
err = err1
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@ package commands
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/docker/buildx/util/cobrautil"
|
|
||||||
"github.com/docker/buildx/version"
|
"github.com/docker/buildx/version"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
@@ -24,9 +23,5 @@ func versionCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
return runVersion(dockerCli)
|
return runVersion(dockerCli)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// hide builder persistent flag for this command
|
|
||||||
cobrautil.HideInheritedFlags(cmd, "builder")
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
149
docker-bake.hcl
149
docker-bake.hcl
@@ -1,149 +0,0 @@
|
|||||||
variable "GO_VERSION" {
|
|
||||||
default = "1.17"
|
|
||||||
}
|
|
||||||
variable "BIN_OUT" {
|
|
||||||
default = "./bin"
|
|
||||||
}
|
|
||||||
variable "RELEASE_OUT" {
|
|
||||||
default = "./release-out"
|
|
||||||
}
|
|
||||||
variable "DOCS_FORMATS" {
|
|
||||||
default = "md"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special target: https://github.com/docker/metadata-action#bake-definition
|
|
||||||
target "meta-helper" {
|
|
||||||
tags = ["docker/buildx-bin:local"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "_common" {
|
|
||||||
args = {
|
|
||||||
GO_VERSION = GO_VERSION
|
|
||||||
BUILDKIT_CONTEXT_KEEP_GIT_DIR = 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
group "default" {
|
|
||||||
targets = ["binaries"]
|
|
||||||
}
|
|
||||||
|
|
||||||
group "validate" {
|
|
||||||
targets = ["lint", "validate-vendor", "validate-docs"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "lint" {
|
|
||||||
inherits = ["_common"]
|
|
||||||
dockerfile = "./hack/dockerfiles/lint.Dockerfile"
|
|
||||||
output = ["type=cacheonly"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "validate-vendor" {
|
|
||||||
inherits = ["_common"]
|
|
||||||
dockerfile = "./hack/dockerfiles/vendor.Dockerfile"
|
|
||||||
target = "validate"
|
|
||||||
output = ["type=cacheonly"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "validate-docs" {
|
|
||||||
inherits = ["_common"]
|
|
||||||
args = {
|
|
||||||
FORMATS = DOCS_FORMATS
|
|
||||||
}
|
|
||||||
dockerfile = "./hack/dockerfiles/docs.Dockerfile"
|
|
||||||
target = "validate"
|
|
||||||
output = ["type=cacheonly"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "validate-authors" {
|
|
||||||
inherits = ["_common"]
|
|
||||||
dockerfile = "./hack/dockerfiles/authors.Dockerfile"
|
|
||||||
target = "validate"
|
|
||||||
output = ["type=cacheonly"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "update-vendor" {
|
|
||||||
inherits = ["_common"]
|
|
||||||
dockerfile = "./hack/dockerfiles/vendor.Dockerfile"
|
|
||||||
target = "update"
|
|
||||||
output = ["."]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "update-docs" {
|
|
||||||
inherits = ["_common"]
|
|
||||||
args = {
|
|
||||||
FORMATS = DOCS_FORMATS
|
|
||||||
}
|
|
||||||
dockerfile = "./hack/dockerfiles/docs.Dockerfile"
|
|
||||||
target = "update"
|
|
||||||
output = ["./docs/reference"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "update-authors" {
|
|
||||||
inherits = ["_common"]
|
|
||||||
dockerfile = "./hack/dockerfiles/authors.Dockerfile"
|
|
||||||
target = "update"
|
|
||||||
output = ["."]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "mod-outdated" {
|
|
||||||
inherits = ["_common"]
|
|
||||||
dockerfile = "./hack/dockerfiles/vendor.Dockerfile"
|
|
||||||
target = "outdated"
|
|
||||||
args = {
|
|
||||||
// used to invalidate cache for outdated run stage
|
|
||||||
// can be dropped when https://github.com/moby/buildkit/issues/1213 fixed
|
|
||||||
_RANDOM = uuidv4()
|
|
||||||
}
|
|
||||||
output = ["type=cacheonly"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "test" {
|
|
||||||
inherits = ["_common"]
|
|
||||||
target = "test-coverage"
|
|
||||||
output = ["./coverage"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "binaries" {
|
|
||||||
inherits = ["_common"]
|
|
||||||
target = "binaries"
|
|
||||||
output = [BIN_OUT]
|
|
||||||
platforms = ["local"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "binaries-cross" {
|
|
||||||
inherits = ["binaries"]
|
|
||||||
platforms = [
|
|
||||||
"darwin/amd64",
|
|
||||||
"darwin/arm64",
|
|
||||||
"linux/amd64",
|
|
||||||
"linux/arm/v6",
|
|
||||||
"linux/arm/v7",
|
|
||||||
"linux/arm64",
|
|
||||||
"linux/ppc64le",
|
|
||||||
"linux/riscv64",
|
|
||||||
"linux/s390x",
|
|
||||||
"windows/amd64",
|
|
||||||
"windows/arm64"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "release" {
|
|
||||||
inherits = ["binaries-cross"]
|
|
||||||
target = "release"
|
|
||||||
output = [RELEASE_OUT]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "image" {
|
|
||||||
inherits = ["meta-helper", "binaries"]
|
|
||||||
output = ["type=image"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "image-cross" {
|
|
||||||
inherits = ["meta-helper", "binaries-cross"]
|
|
||||||
output = ["type=image"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "image-local" {
|
|
||||||
inherits = ["image"]
|
|
||||||
output = ["type=docker"]
|
|
||||||
}
|
|
||||||
198
docs/docsgen/generate.go
Normal file
198
docs/docsgen/generate.go
Normal file
@@ -0,0 +1,198 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/commands"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/pflag"
|
||||||
|
)
|
||||||
|
|
||||||
|
const descriptionSourcePath = "docs/reference/"
|
||||||
|
|
||||||
|
func generateDocs(opts *options) error {
|
||||||
|
dockerCLI, err := command.NewDockerCli()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "docker [OPTIONS] COMMAND [ARG...]",
|
||||||
|
Short: "The base command for the Docker CLI.",
|
||||||
|
}
|
||||||
|
cmd.AddCommand(commands.NewRootCmd("buildx", true, dockerCLI))
|
||||||
|
return genCmd(cmd, opts.target)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getMDFilename(cmd *cobra.Command) string {
|
||||||
|
name := cmd.CommandPath()
|
||||||
|
if i := strings.Index(name, " "); i >= 0 {
|
||||||
|
name = name[i+1:]
|
||||||
|
}
|
||||||
|
return strings.ReplaceAll(name, " ", "_") + ".md"
|
||||||
|
}
|
||||||
|
|
||||||
|
func genCmd(cmd *cobra.Command, dir string) error {
|
||||||
|
for _, c := range cmd.Commands() {
|
||||||
|
if err := genCmd(c, dir); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !cmd.HasParent() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mdFile := getMDFilename(cmd)
|
||||||
|
fullPath := filepath.Join(dir, mdFile)
|
||||||
|
|
||||||
|
content, err := ioutil.ReadFile(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
|
return errors.Wrapf(err, "%s does not exist", mdFile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cs := string(content)
|
||||||
|
|
||||||
|
markerStart := "<!---MARKER_GEN_START-->"
|
||||||
|
markerEnd := "<!---MARKER_GEN_END-->"
|
||||||
|
|
||||||
|
start := strings.Index(cs, markerStart)
|
||||||
|
end := strings.Index(cs, markerEnd)
|
||||||
|
|
||||||
|
if start == -1 {
|
||||||
|
return errors.Errorf("no start marker in %s", mdFile)
|
||||||
|
}
|
||||||
|
if end == -1 {
|
||||||
|
return errors.Errorf("no end marker in %s", mdFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := cmdOutput(cmd, cs)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cont := cs[:start] + markerStart + "\n" + out + "\n" + cs[end:]
|
||||||
|
|
||||||
|
fi, err := os.Stat(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ioutil.WriteFile(fullPath, []byte(cont), fi.Mode()); err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to write %s", fullPath)
|
||||||
|
}
|
||||||
|
log.Printf("updated %s", fullPath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeLink(txt, link string, f *pflag.Flag, isAnchor bool) string {
|
||||||
|
link = "#" + link
|
||||||
|
annotations, ok := f.Annotations["docs.external.url"]
|
||||||
|
if ok && len(annotations) > 0 {
|
||||||
|
link = annotations[0]
|
||||||
|
} else {
|
||||||
|
if !isAnchor {
|
||||||
|
return txt
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "[" + txt + "](" + link + ")"
|
||||||
|
}
|
||||||
|
|
||||||
|
func cmdOutput(cmd *cobra.Command, old string) (string, error) {
|
||||||
|
b := &strings.Builder{}
|
||||||
|
|
||||||
|
desc := cmd.Short
|
||||||
|
if cmd.Long != "" {
|
||||||
|
desc = cmd.Long
|
||||||
|
}
|
||||||
|
if desc != "" {
|
||||||
|
fmt.Fprintf(b, "%s\n\n", desc)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cmd.Aliases) != 0 {
|
||||||
|
fmt.Fprintf(b, "### Aliases\n\n`%s`", cmd.Name())
|
||||||
|
for _, a := range cmd.Aliases {
|
||||||
|
fmt.Fprintf(b, ", `%s`", a)
|
||||||
|
}
|
||||||
|
fmt.Fprint(b, "\n\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cmd.Commands()) != 0 {
|
||||||
|
fmt.Fprint(b, "### Subcommands\n\n")
|
||||||
|
fmt.Fprint(b, "| Name | Description |\n")
|
||||||
|
fmt.Fprint(b, "| --- | --- |\n")
|
||||||
|
for _, c := range cmd.Commands() {
|
||||||
|
fmt.Fprintf(b, "| [`%s`](%s) | %s |\n", c.Name(), getMDFilename(c), c.Short)
|
||||||
|
}
|
||||||
|
fmt.Fprint(b, "\n\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
hasFlags := cmd.Flags().HasAvailableFlags()
|
||||||
|
|
||||||
|
cmd.Flags().AddFlagSet(cmd.InheritedFlags())
|
||||||
|
|
||||||
|
if hasFlags {
|
||||||
|
fmt.Fprint(b, "### Options\n\n")
|
||||||
|
fmt.Fprint(b, "| Name | Description |\n")
|
||||||
|
fmt.Fprint(b, "| --- | --- |\n")
|
||||||
|
|
||||||
|
cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||||||
|
if f.Hidden {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
isLink := strings.Contains(old, "<a name=\""+f.Name+"\"></a>")
|
||||||
|
fmt.Fprint(b, "| ")
|
||||||
|
if f.Shorthand != "" {
|
||||||
|
name := "`-" + f.Shorthand + "`"
|
||||||
|
name = makeLink(name, f.Name, f, isLink)
|
||||||
|
fmt.Fprintf(b, "%s, ", name)
|
||||||
|
}
|
||||||
|
name := "`--" + f.Name
|
||||||
|
if f.Value.Type() != "bool" {
|
||||||
|
name += " " + f.Value.Type()
|
||||||
|
}
|
||||||
|
name += "`"
|
||||||
|
name = makeLink(name, f.Name, f, isLink)
|
||||||
|
fmt.Fprintf(b, "%s | %s |\n", name, f.Usage)
|
||||||
|
})
|
||||||
|
fmt.Fprintln(b, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type options struct {
|
||||||
|
target string
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseArgs() (*options, error) {
|
||||||
|
opts := &options{}
|
||||||
|
flags := pflag.NewFlagSet(os.Args[0], pflag.ContinueOnError)
|
||||||
|
flags.StringVar(&opts.target, "target", descriptionSourcePath, "Docs directory")
|
||||||
|
err := flags.Parse(os.Args[1:])
|
||||||
|
return opts, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if err := run(); err != nil {
|
||||||
|
log.Printf("error: %+v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func run() error {
|
||||||
|
opts, err := parseArgs()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := generateDocs(opts); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -1,89 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/docker/buildx/commands"
|
|
||||||
clidocstool "github.com/docker/cli-docs-tool"
|
|
||||||
"github.com/docker/cli/cli/command"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"github.com/spf13/pflag"
|
|
||||||
|
|
||||||
// import drivers otherwise factories are empty
|
|
||||||
// for --driver output flag usage
|
|
||||||
_ "github.com/docker/buildx/driver/docker"
|
|
||||||
_ "github.com/docker/buildx/driver/docker-container"
|
|
||||||
_ "github.com/docker/buildx/driver/kubernetes"
|
|
||||||
)
|
|
||||||
|
|
||||||
const defaultSourcePath = "docs/reference/"
|
|
||||||
|
|
||||||
type options struct {
|
|
||||||
source string
|
|
||||||
formats []string
|
|
||||||
}
|
|
||||||
|
|
||||||
func gen(opts *options) error {
|
|
||||||
log.SetFlags(0)
|
|
||||||
|
|
||||||
dockerCLI, err := command.NewDockerCli()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cmd := &cobra.Command{
|
|
||||||
Use: "docker [OPTIONS] COMMAND [ARG...]",
|
|
||||||
Short: "The base command for the Docker CLI.",
|
|
||||||
DisableAutoGenTag: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
cmd.AddCommand(commands.NewRootCmd("buildx", true, dockerCLI))
|
|
||||||
|
|
||||||
c, err := clidocstool.New(clidocstool.Options{
|
|
||||||
Root: cmd,
|
|
||||||
SourceDir: opts.source,
|
|
||||||
Plugin: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, format := range opts.formats {
|
|
||||||
switch format {
|
|
||||||
case "md":
|
|
||||||
if err = c.GenMarkdownTree(cmd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case "yaml":
|
|
||||||
if err = c.GenYamlTree(cmd); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return errors.Errorf("unknown format %q", format)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func run() error {
|
|
||||||
opts := &options{}
|
|
||||||
flags := pflag.NewFlagSet(os.Args[0], pflag.ContinueOnError)
|
|
||||||
flags.StringVar(&opts.source, "source", defaultSourcePath, "Docs source folder")
|
|
||||||
flags.StringSliceVar(&opts.formats, "formats", []string{}, "Format (md, yaml)")
|
|
||||||
if err := flags.Parse(os.Args[1:]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(opts.formats) == 0 {
|
|
||||||
return errors.New("Docs format required")
|
|
||||||
}
|
|
||||||
return gen(opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
if err := run(); err != nil {
|
|
||||||
log.Printf("ERROR: %+v", err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
# CI/CD
|
|
||||||
|
|
||||||
## GitHub Actions
|
|
||||||
|
|
||||||
Docker provides a [GitHub Action that will build and push your image](https://github.com/docker/build-push-action/#about)
|
|
||||||
using Buildx. Here is a simple workflow:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
name: ci
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- 'main'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
docker:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
-
|
|
||||||
name: Login to DockerHub
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
with:
|
|
||||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
|
||||||
-
|
|
||||||
name: Build and push
|
|
||||||
uses: docker/build-push-action@v2
|
|
||||||
with:
|
|
||||||
push: true
|
|
||||||
tags: user/app:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
In this example we are also using 3 other actions:
|
|
||||||
|
|
||||||
* [`setup-buildx`](https://github.com/docker/setup-buildx-action) action will create and boot a builder using by
|
|
||||||
default the `docker-container` [builder driver](../reference/buildx_create.md#driver).
|
|
||||||
This is **not required but recommended** using it to be able to build multi-platform images, export cache, etc.
|
|
||||||
* [`setup-qemu`](https://github.com/docker/setup-qemu-action) action can be useful if you want
|
|
||||||
to add emulation support with QEMU to be able to build against more platforms.
|
|
||||||
* [`login`](https://github.com/docker/login-action) action will take care to log
|
|
||||||
in against a Docker registry.
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
# CNI networking
|
|
||||||
|
|
||||||
It can be useful to use a bridge network for your builder if for example you
|
|
||||||
encounter a network port contention during multiple builds. If you're using
|
|
||||||
the BuildKit image, CNI is not yet available in it, but you can create
|
|
||||||
[a custom BuildKit image with CNI support](https://github.com/moby/buildkit/blob/master/docs/cni-networking.md).
|
|
||||||
|
|
||||||
Now build this image:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --tag buildkit-cni:local --load .
|
|
||||||
```
|
|
||||||
|
|
||||||
Then [create a `docker-container` builder](../reference/buildx_create.md) that
|
|
||||||
will use this image:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create --use \
|
|
||||||
--name mybuilder \
|
|
||||||
--driver docker-container \
|
|
||||||
--driver-opt "image=buildkit-cni:local" \
|
|
||||||
--buildkitd-flags "--oci-worker-net=cni"
|
|
||||||
```
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
# Using a custom network
|
|
||||||
|
|
||||||
[Create a network](https://docs.docker.com/engine/reference/commandline/network_create/)
|
|
||||||
named `foonet`:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker network create foonet
|
|
||||||
```
|
|
||||||
|
|
||||||
[Create a `docker-container` builder](../reference/buildx_create.md) named
|
|
||||||
`mybuilder` that will use this network:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create --use \
|
|
||||||
--name mybuilder \
|
|
||||||
--driver docker-container \
|
|
||||||
--driver-opt "network=foonet"
|
|
||||||
```
|
|
||||||
|
|
||||||
Boot and [inspect `mybuilder`](../reference/buildx_inspect.md):
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx inspect --bootstrap
|
|
||||||
```
|
|
||||||
|
|
||||||
[Inspect the builder container](https://docs.docker.com/engine/reference/commandline/inspect/)
|
|
||||||
and see what network is being used:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker inspect buildx_buildkit_mybuilder0 --format={{.NetworkSettings.Networks}}
|
|
||||||
map[foonet:0xc00018c0c0]
|
|
||||||
```
|
|
||||||
|
|
||||||
## What's `buildx_buildkit_mybuilder0`?
|
|
||||||
|
|
||||||
`buildx_buildkit_mybuilder0` is the container name. It can be broken down like this:
|
|
||||||
|
|
||||||
* `buildx_buildkit_` is a hardcoded prefix
|
|
||||||
* `mybuilder0` is the name of the node (defaults to builder name + position in the list of nodes)
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx ls
|
|
||||||
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
|
|
||||||
mybuilder * docker-container
|
|
||||||
mybuilder0 unix:///var/run/docker.sock running linux/amd64, linux/arm64, linux/riscv64, linux/ppc64le, linux/s390x, linux/386, linux/mips64le, linux/mips64, linux/arm/v7, linux/arm/v6
|
|
||||||
default docker
|
|
||||||
default default running linux/amd64, linux/arm64, linux/riscv64, linux/ppc64le, linux/s390x, linux/386, linux/arm/v7, linux/arm/v6
|
|
||||||
```
|
|
||||||
@@ -1,63 +0,0 @@
|
|||||||
# Using a custom registry configuration
|
|
||||||
|
|
||||||
If you [create a `docker-container` or `kubernetes` builder](../reference/buildx_create.md) and
|
|
||||||
have specified certificates for registries in the [BuildKit daemon configuration](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md),
|
|
||||||
the files will be copied into the container under `/etc/buildkit/certs` and
|
|
||||||
configuration will be updated to reflect that.
|
|
||||||
|
|
||||||
Take the following `buildkitd.toml` configuration that will be used for
|
|
||||||
pushing an image to this registry using self-signed certificates:
|
|
||||||
|
|
||||||
```toml"
|
|
||||||
debug = true
|
|
||||||
[registry."myregistry.com"]
|
|
||||||
ca=["/etc/certs/myregistry.pem"]
|
|
||||||
[[registry."myregistry.com".keypair]]
|
|
||||||
key="/etc/certs/myregistry_key.pem"
|
|
||||||
cert="/etc/certs/myregistry_cert.pem"
|
|
||||||
```
|
|
||||||
> `/etc/buildkitd.toml`
|
|
||||||
|
|
||||||
Here we have configured a self-signed certificate for `myregistry.com` registry.
|
|
||||||
|
|
||||||
Now [create a `docker-container` builder](../reference/buildx_create.md)
|
|
||||||
that will use this BuildKit configuration:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create --use \
|
|
||||||
--name mybuilder \
|
|
||||||
--driver docker-container \
|
|
||||||
--config /etc/buildkitd.toml
|
|
||||||
```
|
|
||||||
|
|
||||||
Inspecting the builder container, you can see that buildkitd configuration
|
|
||||||
has changed:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker exec -it buildx_buildkit_mybuilder0 cat /etc/buildkit/buildkitd.toml
|
|
||||||
```
|
|
||||||
```toml
|
|
||||||
debug = true
|
|
||||||
|
|
||||||
[registry]
|
|
||||||
|
|
||||||
[registry."myregistry.com"]
|
|
||||||
ca = ["/etc/buildkit/certs/myregistry.com/myregistry.pem"]
|
|
||||||
|
|
||||||
[[registry."myregistry.com".keypair]]
|
|
||||||
cert = "/etc/buildkit/certs/myregistry.com/myregistry_cert.pem"
|
|
||||||
key = "/etc/buildkit/certs/myregistry.com/myregistry_key.pem"
|
|
||||||
```
|
|
||||||
|
|
||||||
And certificates copied inside the container:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker exec -it buildx_buildkit_mybuilder0 ls /etc/buildkit/certs/myregistry.com/
|
|
||||||
myregistry.pem myregistry_cert.pem myregistry_key.pem
|
|
||||||
```
|
|
||||||
|
|
||||||
Now you should be able to push to the registry with this builder:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --push --tag myregistry.com/myimage:latest .
|
|
||||||
```
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
# OpenTelemetry support
|
|
||||||
|
|
||||||
To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set
|
|
||||||
`JAEGER_TRACE` environment variable to the collection address using a `driver-opt`.
|
|
||||||
|
|
||||||
First create a Jaeger container:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker run -d --name jaeger -p "6831:6831/udp" -p "16686:16686" jaegertracing/all-in-one
|
|
||||||
```
|
|
||||||
|
|
||||||
Then [create a `docker-container` builder](../reference/buildx_create.md)
|
|
||||||
that will use the Jaeger instance via the `JAEGER_TRACE` env var:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create --use \
|
|
||||||
--name mybuilder \
|
|
||||||
--driver docker-container \
|
|
||||||
--driver-opt "network=host" \
|
|
||||||
--driver-opt "env.JAEGER_TRACE=localhost:6831"
|
|
||||||
```
|
|
||||||
|
|
||||||
Boot and [inspect `mybuilder`](../reference/buildx_inspect.md):
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx inspect --bootstrap
|
|
||||||
```
|
|
||||||
|
|
||||||
Buildx commands should be traced at `http://127.0.0.1:16686/`:
|
|
||||||
|
|
||||||

|
|
||||||
@@ -1,60 +0,0 @@
|
|||||||
# Registry mirror
|
|
||||||
|
|
||||||
You can define a registry mirror to use for your builds by providing a [BuildKit daemon configuration](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md)
|
|
||||||
while creating a builder with the [`--config` flags](../reference/buildx_create.md#config).
|
|
||||||
|
|
||||||
```toml
|
|
||||||
debug = true
|
|
||||||
[registry."docker.io"]
|
|
||||||
mirrors = ["mirror.gcr.io"]
|
|
||||||
```
|
|
||||||
> `/etc/buildkitd.toml`
|
|
||||||
|
|
||||||
> :information_source: `debug = true` has been added to be able to debug requests
|
|
||||||
in the BuildKit daemon and see if the mirror is effectively used.
|
|
||||||
|
|
||||||
Then [create a `docker-container` builder](../reference/buildx_create.md)
|
|
||||||
that will use this BuildKit configuration:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create --use \
|
|
||||||
--name mybuilder \
|
|
||||||
--driver docker-container \
|
|
||||||
--config /etc/buildkitd.toml
|
|
||||||
```
|
|
||||||
|
|
||||||
Boot and [inspect `mybuilder`](../reference/buildx_inspect.md):
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx inspect --bootstrap
|
|
||||||
```
|
|
||||||
|
|
||||||
Build an image:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --load . -f-<<EOF
|
|
||||||
FROM alpine
|
|
||||||
RUN echo "hello world"
|
|
||||||
EOF
|
|
||||||
```
|
|
||||||
|
|
||||||
Now let's check the BuildKit logs in the builder container:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker logs buildx_buildkit_mybuilder0
|
|
||||||
```
|
|
||||||
```text
|
|
||||||
...
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg="do request" request.header.accept="application/vnd.docker.container.image.v1+json, */*" request.header.user-agent=containerd/1.5.8+unknown request.method=GET spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg="fetch response received" response.header.accept-ranges=bytes response.header.age=1356 response.header.alt-svc="h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"" response.header.cache-control="public, max-age=3600" response.header.content-length=1469 response.header.content-type=application/octet-stream response.header.date="Sun, 06 Feb 2022 17:25:17 GMT" response.header.etag="\"774380abda8f4eae9a149e5d5d3efc83\"" response.header.expires="Sun, 06 Feb 2022 18:25:17 GMT" response.header.last-modified="Wed, 24 Nov 2021 21:07:57 GMT" response.header.server=UploadServer response.header.x-goog-generation=1637788077652182 response.header.x-goog-hash="crc32c=V3DSrg==" response.header.x-goog-hash.1="md5=d0OAq9qPTq6aFJ5dXT78gw==" response.header.x-goog-metageneration=1 response.header.x-goog-storage-class=STANDARD response.header.x-goog-stored-content-encoding=identity response.header.x-goog-stored-content-length=1469 response.header.x-guploader-uploadid=ADPycduqQipVAXc3tzXmTzKQ2gTT6CV736B2J628smtD1iDytEyiYCgvvdD8zz9BT1J1sASUq9pW_ctUyC4B-v2jvhIxnZTlKg response.status="200 OK" spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg="fetch response received" response.header.accept-ranges=bytes response.header.age=760 response.header.alt-svc="h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"" response.header.cache-control="public, max-age=3600" response.header.content-length=1471 response.header.content-type=application/octet-stream response.header.date="Sun, 06 Feb 2022 17:35:13 GMT" response.header.etag="\"35d688bd15327daafcdb4d4395e616a8\"" response.header.expires="Sun, 06 Feb 2022 18:35:13 GMT" response.header.last-modified="Wed, 24 Nov 2021 21:07:12 GMT" response.header.server=UploadServer response.header.x-goog-generation=1637788032100793 response.header.x-goog-hash="crc32c=aWgRjA==" response.header.x-goog-hash.1="md5=NdaIvRUyfar8201DleYWqA==" response.header.x-goog-metageneration=1 response.header.x-goog-storage-class=STANDARD response.header.x-goog-stored-content-encoding=identity response.header.x-goog-stored-content-length=1471 response.header.x-guploader-uploadid=ADPycdtR-gJYwC7yHquIkJWFFG8FovDySvtmRnZBqlO3yVDanBXh_VqKYt400yhuf0XbQ3ZMB9IZV2vlcyHezn_Pu3a1SMMtiw response.status="200 OK" spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg="do request" request.header.accept="application/vnd.docker.image.rootfs.diff.tar.gzip, */*" request.header.user-agent=containerd/1.5.8+unknown request.method=GET spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
time="2022-02-06T17:47:48Z" level=debug msg="fetch response received" response.header.accept-ranges=bytes response.header.age=1356 response.header.alt-svc="h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"" response.header.cache-control="public, max-age=3600" response.header.content-length=2818413 response.header.content-type=application/octet-stream response.header.date="Sun, 06 Feb 2022 17:25:17 GMT" response.header.etag="\"1d55e7be5a77c4a908ad11bc33ebea1c\"" response.header.expires="Sun, 06 Feb 2022 18:25:17 GMT" response.header.last-modified="Wed, 24 Nov 2021 21:07:06 GMT" response.header.server=UploadServer response.header.x-goog-generation=1637788026431708 response.header.x-goog-hash="crc32c=ZojF+g==" response.header.x-goog-hash.1="md5=HVXnvlp3xKkIrRG8M+vqHA==" response.header.x-goog-metageneration=1 response.header.x-goog-storage-class=STANDARD response.header.x-goog-stored-content-encoding=identity response.header.x-goog-stored-content-length=2818413 response.header.x-guploader-uploadid=ADPycdsebqxiTBJqZ0bv9zBigjFxgQydD2ESZSkKchpE0ILlN9Ibko3C5r4fJTJ4UR9ddp-UBd-2v_4eRpZ8Yo2llW_j4k8WhQ response.status="200 OK" spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
As you can see, requests come from the GCR registry mirror (`response.header.x-goog*`).
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
# Resource limiting
|
|
||||||
|
|
||||||
## Max parallelism
|
|
||||||
|
|
||||||
You can limit the parallelism of the BuildKit solver, which is particularly useful
|
|
||||||
for low-powered machines, using a [BuildKit daemon configuration](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md)
|
|
||||||
while creating a builder with the [`--config` flags](../reference/buildx_create.md#config).
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[worker.oci]
|
|
||||||
max-parallelism = 4
|
|
||||||
```
|
|
||||||
> `/etc/buildkitd.toml`
|
|
||||||
|
|
||||||
Now you can [create a `docker-container` builder](../reference/buildx_create.md)
|
|
||||||
that will use this BuildKit configuration to limit parallelism.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx create --use \
|
|
||||||
--name mybuilder \
|
|
||||||
--driver docker-container \
|
|
||||||
--config /etc/buildkitd.toml
|
|
||||||
```
|
|
||||||
|
|
||||||
## Limit on TCP connections
|
|
||||||
|
|
||||||
We are also now limiting TCP connections to **4 per registry** with an additional
|
|
||||||
connection not used for layer pulls and pushes. This limitation will be able to
|
|
||||||
manage TCP connection per host to avoid your build being stuck while pulling
|
|
||||||
images. The additional connection is used for metadata requests
|
|
||||||
(image config retrieval) to enhance the overall build time.
|
|
||||||
|
|
||||||
More info: [moby/buildkit#2259](https://github.com/moby/buildkit/pull/2259)
|
|
||||||
@@ -5,7 +5,7 @@ docker buildx [OPTIONS] COMMAND
|
|||||||
```
|
```
|
||||||
|
|
||||||
<!---MARKER_GEN_START-->
|
<!---MARKER_GEN_START-->
|
||||||
Extended build capabilities with BuildKit
|
Build with BuildKit
|
||||||
|
|
||||||
### Subcommands
|
### Subcommands
|
||||||
|
|
||||||
@@ -27,17 +27,5 @@ Extended build capabilities with BuildKit
|
|||||||
| [`version`](buildx_version.md) | Show buildx version information |
|
| [`version`](buildx_version.md) | Show buildx version information |
|
||||||
|
|
||||||
|
|
||||||
### Options
|
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
|
||||||
| --- | --- | --- | --- |
|
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
|
||||||
|
|
||||||
You can also use the `BUILDX_BUILDER` environment variable.
|
|
||||||
|
|||||||
@@ -13,18 +13,18 @@ Build from a file
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| `--builder string` | Override the configured builder instance |
|
||||||
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Build definition file |
|
| [`-f`](#file), [`--file stringArray`](#file) | Build definition file |
|
||||||
| `--load` | | | Shorthand for `--set=*.output=type=docker` |
|
| `--load` | Shorthand for --set=*.output=type=docker |
|
||||||
| `--metadata-file` | `string` | | Write build result metadata to the file |
|
| `--metadata-file string` | Write build result metadata to the file |
|
||||||
| [`--no-cache`](#no-cache) | | | Do not use cache when building the image |
|
| [`--no-cache`](#no-cache) | Do not use cache when building the image |
|
||||||
| [`--print`](#print) | | | Print the options without building |
|
| [`--print`](#print) | Print the options without building |
|
||||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
| [`--progress string`](#progress) | Set type of progress output (auto, plain, tty). Use plain to show container output |
|
||||||
| [`--pull`](#pull) | | | Always attempt to pull all referenced images |
|
| [`--pull`](#pull) | Always attempt to pull a newer version of the image |
|
||||||
| `--push` | | | Shorthand for `--set=*.output=type=registry` |
|
| `--push` | Shorthand for --set=*.output=type=registry |
|
||||||
| [`--set`](#set) | `stringArray` | | Override target value (e.g., `targetpattern.key=value`) |
|
| [`--set stringArray`](#set) | Override target value (eg: targetpattern.key=value) |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
@@ -34,23 +34,16 @@ Build from a file
|
|||||||
Bake is a high-level build command. Each specified target will run in parallel
|
Bake is a high-level build command. Each specified target will run in parallel
|
||||||
as part of the build.
|
as part of the build.
|
||||||
|
|
||||||
Read [High-level build options](https://github.com/docker/buildx#high-level-build-options)
|
Read [High-level build options](https://github.com/docker/buildx#high-level-build-options) for introduction.
|
||||||
for introduction.
|
|
||||||
|
|
||||||
Please note that `buildx bake` command may receive backwards incompatible
|
Please note that `buildx bake` command may receive backwards incompatible features in the future if needed. We are looking for feedback on improving the command and extending the functionality further.
|
||||||
features in the future if needed. We are looking for feedback on improving the
|
|
||||||
command and extending the functionality further.
|
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
|
||||||
|
|
||||||
Same as [`buildx --builder`](buildx.md#builder).
|
|
||||||
|
|
||||||
### <a name="file"></a> Specify a build definition file (-f, --file)
|
### <a name="file"></a> Specify a build definition file (-f, --file)
|
||||||
|
|
||||||
By default, `buildx bake` looks for build definition files in the current
|
By default, `buildx bake` looks for build definition files in the current directory,
|
||||||
directory, the following are parsed:
|
the following are parsed:
|
||||||
|
|
||||||
- `docker-compose.yml`
|
- `docker-compose.yml`
|
||||||
- `docker-compose.yaml`
|
- `docker-compose.yaml`
|
||||||
@@ -96,148 +89,27 @@ $ docker buildx bake -f docker-compose.dev.yaml backend database
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
You can also use a remote `git` bake definition:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake "https://github.com/docker/cli.git#v20.10.11" --print
|
|
||||||
#1 [internal] load git source https://github.com/docker/cli.git#v20.10.11
|
|
||||||
#1 0.745 e8f1871b077b64bcb4a13334b7146492773769f7 refs/tags/v20.10.11
|
|
||||||
#1 2.022 From https://github.com/docker/cli
|
|
||||||
#1 2.022 * [new tag] v20.10.11 -> v20.10.11
|
|
||||||
#1 DONE 2.9s
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"binary"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"binary": {
|
|
||||||
"context": "https://github.com/docker/cli.git#v20.10.11",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"BASE_VARIANT": "alpine",
|
|
||||||
"GO_STRIP": "",
|
|
||||||
"VERSION": ""
|
|
||||||
},
|
|
||||||
"target": "binary",
|
|
||||||
"platforms": [
|
|
||||||
"local"
|
|
||||||
],
|
|
||||||
"output": [
|
|
||||||
"build"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
As you can see the context is fixed to `https://github.com/docker/cli.git` even if
|
|
||||||
[no context is actually defined](https://github.com/docker/cli/blob/2776a6d694f988c0c1df61cad4bfac0f54e481c8/docker-bake.hcl#L17-L26)
|
|
||||||
in the definition.
|
|
||||||
|
|
||||||
If you want to access the main context for bake command from a bake file
|
|
||||||
that has been imported remotely, you can use the `BAKE_CMD_CONTEXT` builtin var:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ cat https://raw.githubusercontent.com/tonistiigi/buildx/remote-test/docker-bake.hcl
|
|
||||||
target "default" {
|
|
||||||
context = BAKE_CMD_CONTEXT
|
|
||||||
dockerfile-inline = <<EOT
|
|
||||||
FROM alpine
|
|
||||||
WORKDIR /src
|
|
||||||
COPY . .
|
|
||||||
RUN ls -l && stop
|
|
||||||
EOT
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" --print
|
|
||||||
{
|
|
||||||
"target": {
|
|
||||||
"default": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ touch foo bar
|
|
||||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test"
|
|
||||||
...
|
|
||||||
> [4/4] RUN ls -l && stop:
|
|
||||||
#8 0.101 total 0
|
|
||||||
#8 0.102 -rw-r--r-- 1 root root 0 Jul 27 18:47 bar
|
|
||||||
#8 0.102 -rw-r--r-- 1 root root 0 Jul 27 18:47 foo
|
|
||||||
#8 0.102 /bin/sh: stop: not found
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" "https://github.com/docker/cli.git#v20.10.11" --print
|
|
||||||
#1 [internal] load git source https://github.com/tonistiigi/buildx.git#remote-test
|
|
||||||
#1 0.429 577303add004dd7efeb13434d69ea030d35f7888 refs/heads/remote-test
|
|
||||||
#1 CACHED
|
|
||||||
{
|
|
||||||
"target": {
|
|
||||||
"default": {
|
|
||||||
"context": "https://github.com/docker/cli.git#v20.10.11",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" "https://github.com/docker/cli.git#v20.10.11"
|
|
||||||
...
|
|
||||||
> [4/4] RUN ls -l && stop:
|
|
||||||
#8 0.136 drwxrwxrwx 5 root root 4096 Jul 27 18:31 kubernetes
|
|
||||||
#8 0.136 drwxrwxrwx 3 root root 4096 Jul 27 18:31 man
|
|
||||||
#8 0.136 drwxrwxrwx 2 root root 4096 Jul 27 18:31 opts
|
|
||||||
#8 0.136 -rw-rw-rw- 1 root root 1893 Jul 27 18:31 poule.yml
|
|
||||||
#8 0.136 drwxrwxrwx 7 root root 4096 Jul 27 18:31 scripts
|
|
||||||
#8 0.136 drwxrwxrwx 3 root root 4096 Jul 27 18:31 service
|
|
||||||
#8 0.136 drwxrwxrwx 2 root root 4096 Jul 27 18:31 templates
|
|
||||||
#8 0.136 drwxrwxrwx 10 root root 4096 Jul 27 18:31 vendor
|
|
||||||
#8 0.136 -rwxrwxrwx 1 root root 9620 Jul 27 18:31 vendor.conf
|
|
||||||
#8 0.136 /bin/sh: stop: not found
|
|
||||||
```
|
|
||||||
|
|
||||||
### <a name="no-cache"></a> Do not use cache when building the image (--no-cache)
|
### <a name="no-cache"></a> Do not use cache when building the image (--no-cache)
|
||||||
|
|
||||||
Same as `build --no-cache`. Do not use cache when building the image.
|
Same as `build --no-cache`. Do not use cache when building the image.
|
||||||
|
|
||||||
### <a name="print"></a> Print the options without building (--print)
|
### <a name="print"></a> Print the options without building (--print)
|
||||||
|
|
||||||
Prints the resulting options of the targets desired to be built, in a JSON
|
Prints the resulting options of the targets desired to be built, in a JSON format,
|
||||||
format, without starting a build.
|
without starting a build.
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx bake -f docker-bake.hcl --print db
|
$ docker buildx bake -f docker-bake.hcl --print db
|
||||||
{
|
{
|
||||||
"group": {
|
"target": {
|
||||||
"default": {
|
"db": {
|
||||||
"targets": [
|
"context": "./",
|
||||||
"db"
|
"dockerfile": "Dockerfile",
|
||||||
]
|
"tags": [
|
||||||
}
|
"docker.io/tiborvass/db"
|
||||||
},
|
]
|
||||||
"target": {
|
}
|
||||||
"db": {
|
}
|
||||||
"context": "./",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"tags": [
|
|
||||||
"docker.io/tiborvass/db"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
@@ -264,6 +136,7 @@ $ docker buildx bake --progress=plain
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### <a name="pull"></a> Always attempt to pull a newer version of the image (--pull)
|
### <a name="pull"></a> Always attempt to pull a newer version of the image (--pull)
|
||||||
|
|
||||||
Same as `build --pull`.
|
Same as `build --pull`.
|
||||||
@@ -274,8 +147,10 @@ Same as `build --pull`.
|
|||||||
--set targetpattern.key[.subkey]=value
|
--set targetpattern.key[.subkey]=value
|
||||||
```
|
```
|
||||||
|
|
||||||
Override target configurations from command line. The pattern matching syntax
|
Override target configurations from command line. The pattern matching syntax is
|
||||||
is defined in https://golang.org/pkg/path/#Match.
|
defined in https://golang.org/pkg/path/#Match.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx bake --set target.args.mybuildarg=value
|
$ docker buildx bake --set target.args.mybuildarg=value
|
||||||
@@ -286,8 +161,8 @@ $ docker buildx bake --set foo*.no-cache # bypass caching only for
|
|||||||
```
|
```
|
||||||
|
|
||||||
Complete list of overridable fields:
|
Complete list of overridable fields:
|
||||||
`args`, `cache-from`, `cache-to`, `context`, `dockerfile`, `labels`, `no-cache`,
|
args, cache-from, cache-to, context, dockerfile, labels, no-cache, output, platform,
|
||||||
`output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target`
|
pull, secrets, ssh, tags, target
|
||||||
|
|
||||||
### File definition
|
### File definition
|
||||||
|
|
||||||
@@ -309,7 +184,8 @@ groups to inherit from.
|
|||||||
Note: Design of bake command is work in progress, the user experience may change
|
Note: Design of bake command is work in progress, the user experience may change
|
||||||
based on feedback.
|
based on feedback.
|
||||||
|
|
||||||
HCL definition example:
|
|
||||||
|
**Example HCL definition**
|
||||||
|
|
||||||
```hcl
|
```hcl
|
||||||
group "default" {
|
group "default" {
|
||||||
@@ -334,104 +210,21 @@ target "db" {
|
|||||||
|
|
||||||
Complete list of valid target fields:
|
Complete list of valid target fields:
|
||||||
|
|
||||||
`args`, `cache-from`, `cache-to`, `context`, `contexts`, `dockerfile`, `inherits`, `labels`,
|
`args`, `cache-from`, `cache-to`, `context`, `dockerfile`, `inherits`, `labels`,
|
||||||
`no-cache`, `no-cache-filter`, `output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target`
|
`no-cache`, `output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target`
|
||||||
|
|
||||||
### Global scope attributes
|
|
||||||
|
|
||||||
You can define global scope attributes in HCL/JSON and use them for code reuse
|
|
||||||
and setting values for variables. This means you can do a "data-only" HCL file
|
|
||||||
with the values you want to set/override and use it in the list of regular
|
|
||||||
output files.
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
variable "FOO" {
|
|
||||||
default = "abc"
|
|
||||||
}
|
|
||||||
|
|
||||||
target "app" {
|
|
||||||
args = {
|
|
||||||
v1 = "pre-${FOO}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
You can use this file directly:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print app
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"app"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"app": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"v1": "pre-abc"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Or create an override configuration file:
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# env.hcl
|
|
||||||
WHOAMI="myuser"
|
|
||||||
FOO="def-${WHOAMI}"
|
|
||||||
```
|
|
||||||
|
|
||||||
And invoke bake together with both of the files:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake -f docker-bake.hcl -f env.hcl --print app
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"app"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"app": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"v1": "pre-def-myuser"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### HCL variables and functions
|
### HCL variables and functions
|
||||||
|
|
||||||
Similar to how Terraform provides a way to [define variables](https://www.terraform.io/docs/configuration/variables.html#declaring-an-input-variable),
|
Similar to how Terraform provides a way to [define variables](https://www.terraform.io/docs/configuration/variables.html#declaring-an-input-variable),
|
||||||
the HCL file format also supports variable block definitions. These can be used
|
the HCL file format also supports variable block definitions. These can be used
|
||||||
to define variables with values provided by the current environment, or a
|
to define variables with values provided by the current environment, or a default
|
||||||
default value when unset.
|
value when unset.
|
||||||
|
|
||||||
A [set of generally useful functions](https://github.com/docker/buildx/blob/master/bake/hclparser/stdlib.go)
|
|
||||||
provided by [go-cty](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib)
|
|
||||||
are available for use in HCL files. In addition, [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc)
|
|
||||||
are also supported.
|
|
||||||
|
|
||||||
#### Using interpolation to tag an image with the git sha
|
Example of using interpolation to tag an image with the git sha:
|
||||||
|
|
||||||
Bake supports variable blocks which are assigned to matching environment
|
```console
|
||||||
variables or default values.
|
$ cat <<'EOF' > docker-bake.hcl
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
variable "TAG" {
|
variable "TAG" {
|
||||||
default = "latest"
|
default = "latest"
|
||||||
}
|
}
|
||||||
@@ -443,81 +236,45 @@ group "default" {
|
|||||||
target "webapp" {
|
target "webapp" {
|
||||||
tags = ["docker.io/username/webapp:${TAG}"]
|
tags = ["docker.io/username/webapp:${TAG}"]
|
||||||
}
|
}
|
||||||
```
|
EOF
|
||||||
|
|
||||||
alternatively, in json format:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"variable": {
|
|
||||||
"TAG": {
|
|
||||||
"default": "latest"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": ["webapp"]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"webapp": {
|
|
||||||
"tags": ["docker.io/username/webapp:${TAG}"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print webapp
|
$ docker buildx bake --print webapp
|
||||||
{
|
{
|
||||||
"group": {
|
"target": {
|
||||||
"default": {
|
"webapp": {
|
||||||
"targets": [
|
"context": ".",
|
||||||
"webapp"
|
"dockerfile": "Dockerfile",
|
||||||
]
|
"tags": [
|
||||||
}
|
"docker.io/username/webapp:latest"
|
||||||
},
|
]
|
||||||
"target": {
|
}
|
||||||
"webapp": {
|
}
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"tags": [
|
|
||||||
"docker.io/username/webapp:latest"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ TAG=$(git rev-parse --short HEAD) docker buildx bake --print webapp
|
$ TAG=$(git rev-parse --short HEAD) docker buildx bake --print webapp
|
||||||
{
|
{
|
||||||
"group": {
|
"target": {
|
||||||
"default": {
|
"webapp": {
|
||||||
"targets": [
|
"context": ".",
|
||||||
"webapp"
|
"dockerfile": "Dockerfile",
|
||||||
]
|
"tags": [
|
||||||
}
|
"docker.io/username/webapp:985e9e9"
|
||||||
},
|
]
|
||||||
"target": {
|
}
|
||||||
"webapp": {
|
}
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"tags": [
|
|
||||||
"docker.io/username/webapp:985e9e9"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Using the `add` function
|
|
||||||
|
|
||||||
You can use [`go-cty` stdlib functions](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib).
|
A [set of generally useful functions](https://github.com/docker/buildx/blob/master/bake/hclparser/stdlib.go)
|
||||||
Here we are using the `add` function.
|
provided by [go-cty](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib)
|
||||||
|
are available for use in HCL files. In addition, [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc)
|
||||||
|
are also supported.
|
||||||
|
|
||||||
```hcl
|
Example of using the `add` function:
|
||||||
# docker-bake.hcl
|
|
||||||
|
```console
|
||||||
|
$ cat <<'EOF' > docker-bake.hcl
|
||||||
variable "TAG" {
|
variable "TAG" {
|
||||||
default = "latest"
|
default = "latest"
|
||||||
}
|
}
|
||||||
@@ -531,37 +288,26 @@ target "webapp" {
|
|||||||
buildno = "${add(123, 1)}"
|
buildno = "${add(123, 1)}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
EOF
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print webapp
|
$ docker buildx bake --print webapp
|
||||||
{
|
{
|
||||||
"group": {
|
"target": {
|
||||||
"default": {
|
"webapp": {
|
||||||
"targets": [
|
"context": ".",
|
||||||
"webapp"
|
"dockerfile": "Dockerfile",
|
||||||
]
|
"args": {
|
||||||
}
|
"buildno": "124"
|
||||||
},
|
}
|
||||||
"target": {
|
|
||||||
"webapp": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"buildno": "124"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Defining an `increment` function
|
Example of defining an `increment` function:
|
||||||
|
|
||||||
It also supports [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc).
|
```console
|
||||||
The following example defines a simple an `increment` function.
|
$ cat <<'EOF' > docker-bake.hcl
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
function "increment" {
|
function "increment" {
|
||||||
params = [number]
|
params = [number]
|
||||||
result = number + 1
|
result = number + 1
|
||||||
@@ -576,37 +322,27 @@ target "webapp" {
|
|||||||
buildno = "${increment(123)}"
|
buildno = "${increment(123)}"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
```
|
EOF
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print webapp
|
$ docker buildx bake --print webapp
|
||||||
{
|
{
|
||||||
"group": {
|
"target": {
|
||||||
"default": {
|
"webapp": {
|
||||||
"targets": [
|
"context": ".",
|
||||||
"webapp"
|
"dockerfile": "Dockerfile",
|
||||||
]
|
"args": {
|
||||||
}
|
"buildno": "124"
|
||||||
},
|
}
|
||||||
"target": {
|
|
||||||
"webapp": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"buildno": "124"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Only adding tags if a variable is not empty using an `notequal`
|
Example of only adding tags if a variable is not empty using an `notequal`
|
||||||
|
function:
|
||||||
|
|
||||||
Here we are using the conditional `notequal` function which is just for
|
```console
|
||||||
symmetry with the `equal` one.
|
$ cat <<'EOF' > docker-bake.hcl
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
variable "TAG" {default="" }
|
variable "TAG" {default="" }
|
||||||
|
|
||||||
group "default" {
|
group "default" {
|
||||||
@@ -623,364 +359,18 @@ target "webapp" {
|
|||||||
notequal("",TAG) ? "my-image:${TAG}": "",
|
notequal("",TAG) ? "my-image:${TAG}": "",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
EOF
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print webapp
|
$ docker buildx bake --print webapp
|
||||||
{
|
{
|
||||||
"group": {
|
"target": {
|
||||||
"default": {
|
"webapp": {
|
||||||
"targets": [
|
"context": ".",
|
||||||
"webapp"
|
"dockerfile": "Dockerfile",
|
||||||
]
|
"tags": [
|
||||||
}
|
"my-image:latest"
|
||||||
},
|
]
|
||||||
"target": {
|
|
||||||
"webapp": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"tags": [
|
|
||||||
"my-image:latest"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Using variables in functions
|
|
||||||
|
|
||||||
You can refer variables to other variables like the target blocks can. Stdlib
|
|
||||||
functions can also be called but user functions can't at the moment.
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
variable "REPO" {
|
|
||||||
default = "user/repo"
|
|
||||||
}
|
|
||||||
|
|
||||||
function "tag" {
|
|
||||||
params = [tag]
|
|
||||||
result = ["${REPO}:${tag}"]
|
|
||||||
}
|
|
||||||
|
|
||||||
target "webapp" {
|
|
||||||
tags = tag("v1")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print webapp
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"webapp"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"webapp": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"tags": [
|
|
||||||
"user/repo:v1"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Using variables in variables across files
|
|
||||||
|
|
||||||
When multiple files are specified, one file can use variables defined in
|
|
||||||
another file.
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake1.hcl
|
|
||||||
variable "FOO" {
|
|
||||||
default = upper("${BASE}def")
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "BAR" {
|
|
||||||
default = "-${FOO}-"
|
|
||||||
}
|
|
||||||
|
|
||||||
target "app" {
|
|
||||||
args = {
|
|
||||||
v1 = "pre-${BAR}"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake2.hcl
|
|
||||||
variable "BASE" {
|
|
||||||
default = "abc"
|
|
||||||
}
|
|
||||||
|
|
||||||
target "app" {
|
|
||||||
args = {
|
|
||||||
v2 = "${FOO}-post"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake -f docker-bake1.hcl -f docker-bake2.hcl --print app
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"app"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"app": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"v1": "pre--ABCDEF-",
|
|
||||||
"v2": "ABCDEF-post"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Using typed variables
|
|
||||||
|
|
||||||
Non-string variables are also accepted. The value passed with env is parsed
|
|
||||||
into suitable type first.
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
variable "FOO" {
|
|
||||||
default = 3
|
|
||||||
}
|
|
||||||
|
|
||||||
variable "IS_FOO" {
|
|
||||||
default = true
|
|
||||||
}
|
|
||||||
|
|
||||||
target "app" {
|
|
||||||
args = {
|
|
||||||
v1 = FOO > 5 ? "higher" : "lower"
|
|
||||||
v2 = IS_FOO ? "yes" : "no"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print app
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"app"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"app": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"v1": "lower",
|
|
||||||
"v2": "yes"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Defining additional build contexts and linking targets
|
|
||||||
|
|
||||||
In addition to the main `context` key that defines the build context each target can also define additional named contexts with a map defined with key `contexts`. These values map to the `--build-context` flag in the [build command](buildx_build.md#build-context).
|
|
||||||
|
|
||||||
Inside the Dockerfile these contexts can be used with the `FROM` instruction or `--from` flag.
|
|
||||||
|
|
||||||
The value can be a local source directory, container image (with docker-image:// prefix), Git URL, HTTP URL or a name of another target in the Bake file (with target: prefix).
|
|
||||||
|
|
||||||
#### Pinning alpine image
|
|
||||||
|
|
||||||
```Dockerfile
|
|
||||||
# Dockerfile
|
|
||||||
FROM alpine
|
|
||||||
RUN echo "Hello world"
|
|
||||||
```
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
target "app" {
|
|
||||||
contexts = {
|
|
||||||
alpine = "docker-image://alpine:3.13"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Using a secondary source directory
|
|
||||||
|
|
||||||
```Dockerfile
|
|
||||||
# Dockerfile
|
|
||||||
|
|
||||||
FROM scratch AS src
|
|
||||||
|
|
||||||
FROM golang
|
|
||||||
COPY --from=src . .
|
|
||||||
```
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
target "app" {
|
|
||||||
contexts = {
|
|
||||||
src = "../path/to/source"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Using a result of one target as a base image in another target
|
|
||||||
|
|
||||||
To use a result of one target as a build context of another, specity the target name with `target:` prefix.
|
|
||||||
|
|
||||||
```Dockerfile
|
|
||||||
# Dockerfile
|
|
||||||
FROM baseapp
|
|
||||||
RUN echo "Hello world"
|
|
||||||
```
|
|
||||||
|
|
||||||
```hcl
|
|
||||||
# docker-bake.hcl
|
|
||||||
|
|
||||||
target "base" {
|
|
||||||
dockerfile = "baseapp.Dockerfile"
|
|
||||||
}
|
|
||||||
|
|
||||||
target "app" {
|
|
||||||
contexts = {
|
|
||||||
baseapp = "target:base"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Please note that in most cases you should just use a single multi-stage Dockerfile with multiple targets for similar behavior. This case is recommended when you have multiple Dockerfiles that can't be easily merged into one.
|
|
||||||
|
|
||||||
### Extension field with Compose
|
|
||||||
|
|
||||||
[Special extension](https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension)
|
|
||||||
field `x-bake` can be used in your compose file to evaluate fields that are not
|
|
||||||
(yet) available in the [build definition](https://github.com/compose-spec/compose-spec/blob/master/build.md#build-definition).
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
# docker-compose.yml
|
|
||||||
services:
|
|
||||||
addon:
|
|
||||||
image: ct-addon:bar
|
|
||||||
build:
|
|
||||||
context: .
|
|
||||||
dockerfile: ./Dockerfile
|
|
||||||
args:
|
|
||||||
CT_ECR: foo
|
|
||||||
CT_TAG: bar
|
|
||||||
x-bake:
|
|
||||||
tags:
|
|
||||||
- ct-addon:foo
|
|
||||||
- ct-addon:alp
|
|
||||||
platforms:
|
|
||||||
- linux/amd64
|
|
||||||
- linux/arm64
|
|
||||||
cache-from:
|
|
||||||
- user/app:cache
|
|
||||||
- type=local,src=path/to/cache
|
|
||||||
cache-to: type=local,dest=path/to/cache
|
|
||||||
pull: true
|
|
||||||
|
|
||||||
aws:
|
|
||||||
image: ct-fake-aws:bar
|
|
||||||
build:
|
|
||||||
dockerfile: ./aws.Dockerfile
|
|
||||||
args:
|
|
||||||
CT_ECR: foo
|
|
||||||
CT_TAG: bar
|
|
||||||
x-bake:
|
|
||||||
secret:
|
|
||||||
- id=mysecret,src=./secret
|
|
||||||
- id=mysecret2,src=./secret2
|
|
||||||
platforms: linux/arm64
|
|
||||||
output: type=docker
|
|
||||||
no-cache: true
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx bake --print
|
|
||||||
{
|
|
||||||
"group": {
|
|
||||||
"default": {
|
|
||||||
"targets": [
|
|
||||||
"aws",
|
|
||||||
"addon"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"target": {
|
|
||||||
"addon": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "./Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"CT_ECR": "foo",
|
|
||||||
"CT_TAG": "bar"
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"ct-addon:foo",
|
|
||||||
"ct-addon:alp"
|
|
||||||
],
|
|
||||||
"cache-from": [
|
|
||||||
"user/app:cache",
|
|
||||||
"type=local,src=path/to/cache"
|
|
||||||
],
|
|
||||||
"cache-to": [
|
|
||||||
"type=local,dest=path/to/cache"
|
|
||||||
],
|
|
||||||
"platforms": [
|
|
||||||
"linux/amd64",
|
|
||||||
"linux/arm64"
|
|
||||||
],
|
|
||||||
"pull": true
|
|
||||||
},
|
|
||||||
"aws": {
|
|
||||||
"context": ".",
|
|
||||||
"dockerfile": "./aws.Dockerfile",
|
|
||||||
"args": {
|
|
||||||
"CT_ECR": "foo",
|
|
||||||
"CT_TAG": "bar"
|
|
||||||
},
|
|
||||||
"tags": [
|
|
||||||
"ct-fake-aws:bar"
|
|
||||||
],
|
|
||||||
"secret": [
|
|
||||||
"id=mysecret,src=./secret",
|
|
||||||
"id=mysecret2,src=./secret2"
|
|
||||||
],
|
|
||||||
"platforms": [
|
|
||||||
"linux/arm64"
|
|
||||||
],
|
|
||||||
"output": [
|
|
||||||
"type=docker"
|
|
||||||
],
|
|
||||||
"no-cache": true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Complete list of valid fields for `x-bake`:
|
|
||||||
|
|
||||||
`tags`, `cache-from`, `cache-to`, `secret`, `ssh`, `platforms`, `output`,
|
|
||||||
`pull`, `no-cache`, `no-cache-filter`
|
|
||||||
|
|
||||||
### Built-in variables
|
|
||||||
|
|
||||||
* `BAKE_CMD_CONTEXT` can be used to access the main `context` for bake command
|
|
||||||
from a bake file that has been [imported remotely](#file).
|
|
||||||
* `BAKE_LOCAL_PLATFORM` returns the current platform's default platform
|
|
||||||
specification (e.g. `linux/amd64`).
|
|
||||||
|
|||||||
@@ -13,36 +13,30 @@ Start a build
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--add-host`](https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host) | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
| [`--add-host stringSlice`](https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host) | Add a custom host-to-IP mapping (host:ip) |
|
||||||
| [`--allow`](#allow) | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
| [`--allow stringSlice`](#allow) | Allow extra privileged entitlement, e.g. network.host, security.insecure |
|
||||||
| [`--build-arg`](#build-arg) | `stringArray` | | Set build-time variables |
|
| [`--build-arg stringArray`](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg) | Set build-time variables |
|
||||||
| [`--build-context`](#build-context) | `stringArray` | | Additional build contexts (e.g., name=path) |
|
| `--builder string` | Override the configured builder instance |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--cache-from stringArray`](#cache-from) | External cache sources (eg. user/app:cache, type=local,src=path/to/dir) |
|
||||||
| [`--cache-from`](#cache-from) | `stringArray` | | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
| [`--cache-to stringArray`](#cache-to) | Cache export destinations (eg. user/app:cache, type=local,dest=path/to/dir) |
|
||||||
| [`--cache-to`](#cache-to) | `stringArray` | | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
| [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file string`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | Name of the Dockerfile (Default is 'PATH/Dockerfile') |
|
||||||
| [`--cgroup-parent`](https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent) | `string` | | Optional parent cgroup for the container |
|
| `--iidfile string` | Write the image ID to the file |
|
||||||
| [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
| `--label stringArray` | Set metadata for an image |
|
||||||
| `--iidfile` | `string` | | Write the image ID to the file |
|
| [`--load`](#load) | Shorthand for --output=type=docker |
|
||||||
| `--label` | `stringArray` | | Set metadata for an image |
|
| `--metadata-file string` | Write build result metadata to the file |
|
||||||
| [`--load`](#load) | | | Shorthand for `--output=type=docker` |
|
| `--network string` | Set the networking mode for the RUN instructions during build |
|
||||||
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to the file |
|
| `--no-cache` | Do not use cache when building the image |
|
||||||
| `--network` | `string` | `default` | Set the networking mode for the `RUN` instructions during build |
|
| [`-o`](#output), [`--output stringArray`](#output) | Output destination (format: type=local,dest=path) |
|
||||||
| `--no-cache` | | | Do not use cache when building the image |
|
| [`--platform stringArray`](#platform) | Set target platform for build |
|
||||||
| `--no-cache-filter` | `stringArray` | | Do not cache specified stages |
|
| [`--progress string`](#progress) | Set type of progress output (auto, plain, tty). Use plain to show container output |
|
||||||
| [`-o`](#output), [`--output`](#output) | `stringArray` | | Output destination (format: `type=local,dest=path`) |
|
| `--pull` | Always attempt to pull a newer version of the image |
|
||||||
| [`--platform`](#platform) | `stringArray` | | Set target platform for build |
|
| [`--push`](#push) | Shorthand for --output=type=registry |
|
||||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
| `--secret stringArray` | Secret file to expose to the build: id=mysecret,src=/local/secret |
|
||||||
| `--pull` | | | Always attempt to pull all referenced images |
|
| `--ssh stringArray` | SSH agent socket or keys to expose to the build (format: default|<id>[=<socket>|<key>[,<key>]]) |
|
||||||
| [`--push`](#push) | | | Shorthand for `--output=type=registry` |
|
| [`-t`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t), [`--tag stringArray`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t) | Name and optionally a tag in the 'name:tag' format |
|
||||||
| `-q`, `--quiet` | | | Suppress the build output and print image ID on success |
|
| [`--target string`](https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target) | Set the target build stage to build. |
|
||||||
| [`--secret`](#secret) | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
|
||||||
| [`--shm-size`](#shm-size) | `bytes` | `0` | Size of `/dev/shm` |
|
|
||||||
| [`--ssh`](#ssh) | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
|
||||||
| [`-t`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t), [`--tag`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t) | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
|
||||||
| [`--target`](https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target) | `string` | | Set the target build stage to build |
|
|
||||||
| [`--ulimit`](#ulimit) | `ulimit` | | Ulimit options |
|
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
@@ -54,200 +48,76 @@ to the UI of `docker build` command and takes the same flags and arguments.
|
|||||||
|
|
||||||
For documentation on most of these flags, refer to the [`docker build`
|
For documentation on most of these flags, refer to the [`docker build`
|
||||||
documentation](https://docs.docker.com/engine/reference/commandline/build/). In
|
documentation](https://docs.docker.com/engine/reference/commandline/build/). In
|
||||||
here we'll document a subset of the new flags.
|
here we’ll document a subset of the new flags.
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
|
### <a name="platform"></a> Set the target platforms for the build (--platform)
|
||||||
|
|
||||||
```
|
```
|
||||||
--allow=ENTITLEMENT
|
--platform=value[,value]
|
||||||
```
|
```
|
||||||
|
|
||||||
Allow extra privileged entitlement. List of entitlements:
|
Set the target platform for the build. All `FROM` commands inside the Dockerfile
|
||||||
|
without their own `--platform` flag will pull base images for this platform and
|
||||||
|
this value will also be the platform of the resulting image. The default value
|
||||||
|
will be the current platform of the buildkit daemon.
|
||||||
|
|
||||||
- `network.host` - Allows executions with host networking.
|
When using `docker-container` driver with `buildx`, this flag can accept multiple
|
||||||
- `security.insecure` - Allows executions without sandbox. See
|
values as an input separated by a comma. With multiple values the result will be
|
||||||
[related Dockerfile extensions](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#run---securityinsecuresandbox).
|
built for all of the specified platforms and joined together into a single manifest
|
||||||
|
list.
|
||||||
|
|
||||||
For entitlements to be enabled, the `buildkitd` daemon also needs to allow them
|
If the `Dockerfile` needs to invoke the `RUN` command, the builder needs runtime
|
||||||
with `--allow-insecure-entitlement` (see [`create --buildkitd-flags`](buildx_create.md#buildkitd-flags))
|
support for the specified platform. In a clean setup, you can only execute `RUN`
|
||||||
|
commands for your system architecture.
|
||||||
|
If your kernel supports [`binfmt_misc`](https://en.wikipedia.org/wiki/Binfmt_misc)
|
||||||
|
launchers for secondary architectures, buildx will pick them up automatically.
|
||||||
|
Docker desktop releases come with `binfmt_misc` automatically configured for `arm64`
|
||||||
|
and `arm` architectures. You can see what runtime platforms your current builder
|
||||||
|
instance supports by running `docker buildx inspect --bootstrap`.
|
||||||
|
|
||||||
|
Inside a `Dockerfile`, you can access the current platform value through
|
||||||
|
`TARGETPLATFORM` build argument. Please refer to the [`docker build`
|
||||||
|
documentation](https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope)
|
||||||
|
for the full description of automatic platform argument variants .
|
||||||
|
|
||||||
|
The formatting for the platform specifier is defined in the [containerd source
|
||||||
|
code](https://github.com/containerd/containerd/blob/v1.4.3/platforms/platforms.go#L63).
|
||||||
|
|
||||||
**Examples**
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx create --use --name insecure-builder --buildkitd-flags '--allow-insecure-entitlement security.insecure'
|
$ docker buildx build --platform=linux/arm64 .
|
||||||
$ docker buildx build --allow security.insecure .
|
$ docker buildx build --platform=linux/amd64,linux/arm64,linux/arm/v7 .
|
||||||
|
$ docker buildx build --platform=darwin .
|
||||||
```
|
```
|
||||||
|
|
||||||
### <a name="build-arg"></a> Set build-time variables (--build-arg)
|
### <a name="progress"></a> Set type of progress output (--progress)
|
||||||
|
|
||||||
Same as [`docker build` command](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg).
|
```
|
||||||
|
--progress=VALUE
|
||||||
|
```
|
||||||
|
|
||||||
There are also useful built-in build args like:
|
Set type of progress output (auto, plain, tty). Use plain to show container
|
||||||
|
output (default "auto").
|
||||||
|
|
||||||
* `BUILDKIT_CONTEXT_KEEP_GIT_DIR=<bool>` trigger git context to keep the `.git` directory
|
> You can also use the `BUILDKIT_PROGRESS` environment variable to set
|
||||||
* `BUILDKIT_INLINE_BUILDINFO_ATTRS=<bool>` inline build info attributes in image config or not
|
> its value.
|
||||||
* `BUILDKIT_INLINE_CACHE=<bool>` inline cache metadata to image config or not
|
|
||||||
* `BUILDKIT_MULTI_PLATFORM=<bool>` opt into determnistic output regardless of multi-platform output or not
|
The following example uses `plain` output during the build:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx build --build-arg BUILDKIT_MULTI_PLATFORM=1 .
|
$ docker buildx build --load --progress=plain .
|
||||||
```
|
|
||||||
|
|
||||||
More built-in build args can be found in [dockerfile frontend docs](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#built-in-build-args).
|
#1 [internal] load build definition from Dockerfile
|
||||||
|
#1 transferring dockerfile: 227B 0.0s done
|
||||||
|
#1 DONE 0.1s
|
||||||
|
|
||||||
### <a name="build-context"></a> Additional build contexts (--build-context)
|
#2 [internal] load .dockerignore
|
||||||
|
#2 transferring context: 129B 0.0s done
|
||||||
```
|
#2 DONE 0.0s
|
||||||
--build-context=name=VALUE
|
...
|
||||||
```
|
|
||||||
|
|
||||||
Define additional build context with specified contents. In Dockerfile the context can be accessed when `FROM name` or `--from=name` is used.
|
|
||||||
When Dockerfile defines a stage with the same name it is overwritten.
|
|
||||||
|
|
||||||
The value can be a local source directory, container image (with docker-image:// prefix), Git or HTTP URL.
|
|
||||||
|
|
||||||
Replace `alpine:latest` with a pinned one:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --build-context alpine=docker-image://alpine@sha256:0123456789 .
|
|
||||||
```
|
|
||||||
|
|
||||||
Expose a secondary local source directory:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --build-context project=path/to/project/source .
|
|
||||||
# docker buildx build --build-context project=https://github.com/myuser/project.git .
|
|
||||||
```
|
|
||||||
|
|
||||||
```Dockerfile
|
|
||||||
FROM alpine
|
|
||||||
COPY --from=project myfile /
|
|
||||||
```
|
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
|
||||||
|
|
||||||
Same as [`buildx --builder`](buildx.md#builder).
|
|
||||||
|
|
||||||
### <a name="cache-from"></a> Use an external cache source for a build (--cache-from)
|
|
||||||
|
|
||||||
```
|
|
||||||
--cache-from=[NAME|type=TYPE[,KEY=VALUE]]
|
|
||||||
```
|
|
||||||
|
|
||||||
Use an external cache source for a build. Supported types are `registry`,
|
|
||||||
`local` and `gha`.
|
|
||||||
|
|
||||||
- [`registry` source](https://github.com/moby/buildkit#registry-push-image-and-cache-separately)
|
|
||||||
can import cache from a cache manifest or (special) image configuration on the
|
|
||||||
registry.
|
|
||||||
- [`local` source](https://github.com/moby/buildkit#local-directory-1) can
|
|
||||||
import cache from local files previously exported with `--cache-to`.
|
|
||||||
- [`gha` source](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
|
||||||
can import cache from a previously exported cache with `--cache-to` in your
|
|
||||||
GitHub repository
|
|
||||||
|
|
||||||
If no type is specified, `registry` exporter is used with a specified reference.
|
|
||||||
|
|
||||||
`docker` driver currently only supports importing build cache from the registry.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --cache-from=user/app:cache .
|
|
||||||
$ docker buildx build --cache-from=user/app .
|
|
||||||
$ docker buildx build --cache-from=type=registry,ref=user/app .
|
|
||||||
$ docker buildx build --cache-from=type=local,src=path/to/cache .
|
|
||||||
$ docker buildx build --cache-from=type=gha .
|
|
||||||
```
|
|
||||||
|
|
||||||
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
|
||||||
|
|
||||||
### <a name="cache-to"></a> Export build cache to an external cache destination (--cache-to)
|
|
||||||
|
|
||||||
```
|
|
||||||
--cache-to=[NAME|type=TYPE[,KEY=VALUE]]
|
|
||||||
```
|
|
||||||
|
|
||||||
Export build cache to an external cache destination. Supported types are
|
|
||||||
`registry`, `local`, `inline` and `gha`.
|
|
||||||
|
|
||||||
- [`registry` type](https://github.com/moby/buildkit#registry-push-image-and-cache-separately) exports build cache to a cache manifest in the registry.
|
|
||||||
- [`local` type](https://github.com/moby/buildkit#local-directory-1) type
|
|
||||||
exports cache to a local directory on the client.
|
|
||||||
- [`inline` type](https://github.com/moby/buildkit#inline-push-image-and-cache-together)
|
|
||||||
type writes the cache metadata into the image configuration.
|
|
||||||
- [`gha` type](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
|
||||||
type exports cache through the [Github Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication).
|
|
||||||
|
|
||||||
`docker` driver currently only supports exporting inline cache metadata to image
|
|
||||||
configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used
|
|
||||||
to trigger inline cache exporter.
|
|
||||||
|
|
||||||
Attribute key:
|
|
||||||
|
|
||||||
- `mode` - Specifies how many layers are exported with the cache. `min` on only
|
|
||||||
exports layers already in the final build stage, `max` exports layers for
|
|
||||||
all stages. Metadata is always exported for the whole build.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --cache-to=user/app:cache .
|
|
||||||
$ docker buildx build --cache-to=type=inline .
|
|
||||||
$ docker buildx build --cache-to=type=registry,ref=user/app .
|
|
||||||
$ docker buildx build --cache-to=type=local,dest=path/to/cache .
|
|
||||||
$ docker buildx build --cache-to=type=gha .
|
|
||||||
```
|
|
||||||
|
|
||||||
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
|
||||||
|
|
||||||
### <a name="load"></a> Load the single-platform build result to `docker images` (--load)
|
|
||||||
|
|
||||||
Shorthand for [`--output=type=docker`](#docker). Will automatically load the
|
|
||||||
single-platform build result to `docker images`.
|
|
||||||
|
|
||||||
### <a name="metadata-file"></a> Write build result metadata to the file (--metadata-file)
|
|
||||||
|
|
||||||
To output build metadata such as the image digest, pass the `--metadata-file` flag.
|
|
||||||
The metadata will be written as a JSON object to the specified file. The
|
|
||||||
directory of the specified file must already exist and be writable.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --load --metadata-file metadata.json .
|
|
||||||
$ cat metadata.json
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"containerimage.buildinfo": {
|
|
||||||
"frontend": "dockerfile.v0",
|
|
||||||
"attrs": {
|
|
||||||
"context": "https://github.com/crazy-max/buildkit-buildsources-test.git#master",
|
|
||||||
"filename": "Dockerfile",
|
|
||||||
"source": "docker/dockerfile:master"
|
|
||||||
},
|
|
||||||
"sources": [
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
|
|
||||||
"pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/library/alpine:3.13",
|
|
||||||
"pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"containerimage.config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66",
|
|
||||||
"containerimage.descriptor": {
|
|
||||||
"annotations": {
|
|
||||||
"config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66",
|
|
||||||
"org.opencontainers.image.created": "2022-02-08T21:28:03Z"
|
|
||||||
},
|
|
||||||
"digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3",
|
|
||||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
|
||||||
"size": 506
|
|
||||||
},
|
|
||||||
"containerimage.digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3"
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### <a name="output"></a> Set the export action for the build result (-o, --output)
|
### <a name="output"></a> Set the export action for the build result (-o, --output)
|
||||||
@@ -268,6 +138,8 @@ If just the path is specified as a value, `buildx` will use the local exporter
|
|||||||
with this path as the destination. If the value is "-", `buildx` will use `tar`
|
with this path as the destination. If the value is "-", `buildx` will use `tar`
|
||||||
exporter and write to `stdout`.
|
exporter and write to `stdout`.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx build -o . .
|
$ docker buildx build -o . .
|
||||||
$ docker buildx build -o outdir .
|
$ docker buildx build -o outdir .
|
||||||
@@ -321,7 +193,7 @@ The most common usecase for multi-platform images is to directly push to a regis
|
|||||||
Attribute keys:
|
Attribute keys:
|
||||||
|
|
||||||
- `dest` - destination path where tarball will be written. If not specified the
|
- `dest` - destination path where tarball will be written. If not specified the
|
||||||
tar will be loaded automatically to the current docker instance.
|
tar will be loaded automatically to the current docker instance.
|
||||||
- `context` - name for the docker context where to import the result
|
- `context` - name for the docker context where to import the result
|
||||||
|
|
||||||
#### `image`
|
#### `image`
|
||||||
@@ -339,174 +211,89 @@ Attribute keys:
|
|||||||
|
|
||||||
The `registry` exporter is a shortcut for `type=image,push=true`.
|
The `registry` exporter is a shortcut for `type=image,push=true`.
|
||||||
|
|
||||||
### <a name="platform"></a> Set the target platforms for the build (--platform)
|
|
||||||
|
|
||||||
```
|
|
||||||
--platform=value[,value]
|
|
||||||
```
|
|
||||||
|
|
||||||
Set the target platform for the build. All `FROM` commands inside the Dockerfile
|
|
||||||
without their own `--platform` flag will pull base images for this platform and
|
|
||||||
this value will also be the platform of the resulting image. The default value
|
|
||||||
will be the current platform of the buildkit daemon.
|
|
||||||
|
|
||||||
When using `docker-container` driver with `buildx`, this flag can accept multiple
|
|
||||||
values as an input separated by a comma. With multiple values the result will be
|
|
||||||
built for all of the specified platforms and joined together into a single manifest
|
|
||||||
list.
|
|
||||||
|
|
||||||
If the `Dockerfile` needs to invoke the `RUN` command, the builder needs runtime
|
|
||||||
support for the specified platform. In a clean setup, you can only execute `RUN`
|
|
||||||
commands for your system architecture.
|
|
||||||
If your kernel supports [`binfmt_misc`](https://en.wikipedia.org/wiki/Binfmt_misc)
|
|
||||||
launchers for secondary architectures, buildx will pick them up automatically.
|
|
||||||
Docker desktop releases come with `binfmt_misc` automatically configured for `arm64`
|
|
||||||
and `arm` architectures. You can see what runtime platforms your current builder
|
|
||||||
instance supports by running `docker buildx inspect --bootstrap`.
|
|
||||||
|
|
||||||
Inside a `Dockerfile`, you can access the current platform value through
|
|
||||||
`TARGETPLATFORM` build argument. Please refer to the [`docker build`
|
|
||||||
documentation](https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope)
|
|
||||||
for the full description of automatic platform argument variants .
|
|
||||||
|
|
||||||
The formatting for the platform specifier is defined in the [containerd source
|
|
||||||
code](https://github.com/containerd/containerd/blob/v1.4.3/platforms/platforms.go#L63).
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --platform=linux/arm64 .
|
|
||||||
$ docker buildx build --platform=linux/amd64,linux/arm64,linux/arm/v7 .
|
|
||||||
$ docker buildx build --platform=darwin .
|
|
||||||
```
|
|
||||||
|
|
||||||
### <a name="progress"></a> Set type of progress output (--progress)
|
|
||||||
|
|
||||||
```
|
|
||||||
--progress=VALUE
|
|
||||||
```
|
|
||||||
|
|
||||||
Set type of progress output (auto, plain, tty). Use plain to show container
|
|
||||||
output (default "auto").
|
|
||||||
|
|
||||||
> You can also use the `BUILDKIT_PROGRESS` environment variable to set
|
|
||||||
> its value.
|
|
||||||
|
|
||||||
The following example uses `plain` output during the build:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --load --progress=plain .
|
|
||||||
|
|
||||||
#1 [internal] load build definition from Dockerfile
|
|
||||||
#1 transferring dockerfile: 227B 0.0s done
|
|
||||||
#1 DONE 0.1s
|
|
||||||
|
|
||||||
#2 [internal] load .dockerignore
|
|
||||||
#2 transferring context: 129B 0.0s done
|
|
||||||
#2 DONE 0.0s
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
### <a name="push"></a> Push the build result to a registry (--push)
|
### <a name="push"></a> Push the build result to a registry (--push)
|
||||||
|
|
||||||
Shorthand for [`--output=type=registry`](#registry). Will automatically push the
|
Shorthand for [`--output=type=registry`](#registry). Will automatically push the
|
||||||
build result to registry.
|
build result to registry.
|
||||||
|
|
||||||
### <a name="secret"></a> Secret to expose to the build (--secret)
|
### <a name="load"></a> Load the single-platform build result to `docker images` (--load)
|
||||||
|
|
||||||
|
Shorthand for [`--output=type=docker`](#docker). Will automatically load the
|
||||||
|
single-platform build result to `docker images`.
|
||||||
|
|
||||||
|
### <a name="cache-from"></a> Use an external cache source for a build (--cache-from)
|
||||||
|
|
||||||
```
|
```
|
||||||
--secret=[type=TYPE[,KEY=VALUE]
|
--cache-from=[NAME|type=TYPE[,KEY=VALUE]]
|
||||||
```
|
```
|
||||||
|
|
||||||
Exposes secret to the build. The secret can be used by the build using
|
Use an external cache source for a build. Supported types are `registry` and `local`.
|
||||||
[`RUN --mount=type=secret` mount](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#run---mounttypesecret).
|
The `registry` source can import cache from a cache manifest or (special) image
|
||||||
|
configuration on the registry. The `local` source can import cache from local
|
||||||
|
files previously exported with `--cache-to`.
|
||||||
|
|
||||||
If `type` is unset it will be detected. Supported types are:
|
If no type is specified, `registry` exporter is used with a specified reference.
|
||||||
|
|
||||||
#### `file`
|
`docker` driver currently only supports importing build cache from the registry.
|
||||||
|
|
||||||
Attribute keys:
|
**Examples**
|
||||||
|
|
||||||
- `id` - ID of the secret. Defaults to basename of the `src` path.
|
|
||||||
- `src`, `source` - Secret filename. `id` used if unset.
|
|
||||||
|
|
||||||
```dockerfile
|
|
||||||
# syntax=docker/dockerfile:1.3
|
|
||||||
FROM python:3
|
|
||||||
RUN pip install awscli
|
|
||||||
RUN --mount=type=secret,id=aws,target=/root/.aws/credentials \
|
|
||||||
aws s3 cp s3://... ...
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx build --secret id=aws,src=$HOME/.aws/credentials .
|
$ docker buildx build --cache-from=user/app:cache .
|
||||||
|
$ docker buildx build --cache-from=user/app .
|
||||||
|
$ docker buildx build --cache-from=type=registry,ref=user/app .
|
||||||
|
$ docker buildx build --cache-from=type=local,src=path/to/cache .
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `env`
|
### <a name="cache-to"></a> Export build cache to an external cache destination (--cache-to)
|
||||||
|
|
||||||
Attribute keys:
|
|
||||||
|
|
||||||
- `id` - ID of the secret. Defaults to `env` name.
|
|
||||||
- `env` - Secret environment variable. `id` used if unset, otherwise will look for `src`, `source` if `id` unset.
|
|
||||||
|
|
||||||
```dockerfile
|
|
||||||
# syntax=docker/dockerfile:1.3
|
|
||||||
FROM node:alpine
|
|
||||||
RUN --mount=type=bind,target=. \
|
|
||||||
--mount=type=secret,id=SECRET_TOKEN \
|
|
||||||
SECRET_TOKEN=$(cat /run/secrets/SECRET_TOKEN) yarn run test
|
|
||||||
```
|
```
|
||||||
|
--cache-to=[NAME|type=TYPE[,KEY=VALUE]]
|
||||||
|
```
|
||||||
|
|
||||||
|
Export build cache to an external cache destination. Supported types are `registry`,
|
||||||
|
`local` and `inline`. Registry exports build cache to a cache manifest in the
|
||||||
|
registry, local exports cache to a local directory on the client and inline writes
|
||||||
|
the cache metadata into the image configuration.
|
||||||
|
|
||||||
|
`docker` driver currently only supports exporting inline cache metadata to image
|
||||||
|
configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used
|
||||||
|
to trigger inline cache exporter.
|
||||||
|
|
||||||
|
Attribute key:
|
||||||
|
|
||||||
|
- `mode` - Specifies how many layers are exported with the cache. “min” on only
|
||||||
|
exports layers already in the final build stage, “max” exports layers for
|
||||||
|
all stages. Metadata is always exported for the whole build.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ SECRET_TOKEN=token docker buildx build --secret id=SECRET_TOKEN .
|
$ docker buildx build --cache-to=user/app:cache .
|
||||||
|
$ docker buildx build --cache-to=type=inline .
|
||||||
|
$ docker buildx build --cache-to=type=registry,ref=user/app .
|
||||||
|
$ docker buildx build --cache-to=type=local,dest=path/to/cache .
|
||||||
```
|
```
|
||||||
|
|
||||||
### <a name="shm-size"></a> Size of /dev/shm (--shm-size)
|
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
|
||||||
|
|
||||||
The format is `<number><unit>`. `number` must be greater than `0`. Unit is
|
|
||||||
optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g`
|
|
||||||
(gigabytes). If you omit the unit, the system uses bytes.
|
|
||||||
|
|
||||||
### <a name="ssh"></a> SSH agent socket or keys to expose to the build (--ssh)
|
|
||||||
|
|
||||||
```
|
```
|
||||||
--ssh=default|<id>[=<socket>|<key>[,<key>]]
|
--allow=ENTITLEMENT
|
||||||
```
|
```
|
||||||
|
|
||||||
This can be useful when some commands in your Dockerfile need specific SSH
|
Allow extra privileged entitlement. List of entitlements:
|
||||||
authentication (e.g., cloning a private repository).
|
|
||||||
|
|
||||||
`--ssh` exposes SSH agent socket or keys to the build and can be used with the
|
- `network.host` - Allows executions with host networking.
|
||||||
[`RUN --mount=type=ssh` mount](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#run---mounttypessh).
|
- `security.insecure` - Allows executions without sandbox. See
|
||||||
|
[related Dockerfile extensions](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#run---securityinsecuresandbox).
|
||||||
|
|
||||||
Example to access Gitlab using an SSH agent socket:
|
For entitlements to be enabled, the `buildkitd` daemon also needs to allow them
|
||||||
|
with `--allow-insecure-entitlement` (see [`create --buildkitd-flags`](buildx_create.md#--buildkitd-flags-flags))
|
||||||
|
|
||||||
```dockerfile
|
**Examples**
|
||||||
# syntax=docker/dockerfile:1.3
|
|
||||||
FROM alpine
|
|
||||||
RUN apk add --no-cache openssh-client
|
|
||||||
RUN mkdir -p -m 0700 ~/.ssh && ssh-keyscan gitlab.com >> ~/.ssh/known_hosts
|
|
||||||
RUN --mount=type=ssh ssh -q -T git@gitlab.com 2>&1 | tee /hello
|
|
||||||
# "Welcome to GitLab, @GITLAB_USERNAME_ASSOCIATED_WITH_SSHKEY" should be printed here
|
|
||||||
# with the type of build progress is defined as `plain`.
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ eval $(ssh-agent)
|
$ docker buildx create --use --name insecure-builder --buildkitd-flags '--allow-insecure-entitlement security.insecure'
|
||||||
$ ssh-add ~/.ssh/id_rsa
|
$ docker buildx build --allow security.insecure .
|
||||||
(Input your passphrase here)
|
|
||||||
$ docker buildx build --ssh default=$SSH_AUTH_SOCK .
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### <a name="ulimit"></a> Set ulimits (--ulimit)
|
|
||||||
|
|
||||||
`--ulimit` is specified with a soft and hard limit as such:
|
|
||||||
`<type>=<soft limit>[:<hard limit>]`, for example:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx build --ulimit nofile=1024:1024 .
|
|
||||||
```
|
|
||||||
|
|
||||||
> **Note**
|
|
||||||
>
|
|
||||||
> If you do not provide a `hard limit`, the `soft limit` is used
|
|
||||||
> for both values. If no `ulimits` are set, they are inherited from
|
|
||||||
> the default `ulimits` set on the daemon.
|
|
||||||
|
|||||||
@@ -9,19 +9,19 @@ Create a new builder instance
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--append`](#append) | | | Append a node to builder instead of changing it |
|
| [`--append`](#append) | Append a node to builder instead of changing it |
|
||||||
| `--bootstrap` | | | Boot builder after creation |
|
| `--builder string` | Override the configured builder instance |
|
||||||
| [`--buildkitd-flags`](#buildkitd-flags) | `string` | | Flags for buildkitd daemon |
|
| [`--buildkitd-flags string`](#buildkitd-flags) | Flags for buildkitd daemon |
|
||||||
| [`--config`](#config) | `string` | | BuildKit config file |
|
| [`--config string`](#config) | BuildKit config file |
|
||||||
| [`--driver`](#driver) | `string` | | Driver to use (available: `docker`, `docker-container`, `kubernetes`) |
|
| [`--driver string`](#driver) | Driver to use (available: []) |
|
||||||
| [`--driver-opt`](#driver-opt) | `stringArray` | | Options for the driver |
|
| [`--driver-opt stringArray`](#driver-opt) | Options for the driver |
|
||||||
| [`--leave`](#leave) | | | Remove a node from builder instead of changing it |
|
| [`--leave`](#leave) | Remove a node from builder instead of changing it |
|
||||||
| [`--name`](#name) | `string` | | Builder instance name |
|
| [`--name string`](#name) | Builder instance name |
|
||||||
| [`--node`](#node) | `string` | | Create/modify node with given name |
|
| [`--node string`](#node) | Create/modify node with given name |
|
||||||
| [`--platform`](#platform) | `stringArray` | | Fixed platforms for current node |
|
| [`--platform stringArray`](#platform) | Fixed platforms for current node |
|
||||||
| [`--use`](#use) | | | Set the current builder instance |
|
| [`--use`](#use) | Set the current builder instance |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
@@ -47,6 +47,8 @@ The `--append` flag changes the action of the command to append a new node to an
|
|||||||
existing builder specified by `--name`. Buildx will choose an appropriate node
|
existing builder specified by `--name`. Buildx will choose an appropriate node
|
||||||
for a build based on the platforms it supports.
|
for a build based on the platforms it supports.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx create mycontext1
|
$ docker buildx create mycontext1
|
||||||
eager_beaver
|
eager_beaver
|
||||||
@@ -62,9 +64,11 @@ eager_beaver
|
|||||||
```
|
```
|
||||||
|
|
||||||
Adds flags when starting the buildkitd daemon. They take precedence over the
|
Adds flags when starting the buildkitd daemon. They take precedence over the
|
||||||
configuration file specified by [`--config`](#config). See `buildkitd --help`
|
configuration file specified by [`--config`](#--config-file). See `buildkitd --help`
|
||||||
for the available flags.
|
for the available flags.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
```
|
```
|
||||||
--buildkitd-flags '--debug --debugaddr 0.0.0.0:6666'
|
--buildkitd-flags '--debug --debugaddr 0.0.0.0:6666'
|
||||||
```
|
```
|
||||||
@@ -76,14 +80,9 @@ for the available flags.
|
|||||||
```
|
```
|
||||||
|
|
||||||
Specifies the configuration file for the buildkitd daemon to use. The configuration
|
Specifies the configuration file for the buildkitd daemon to use. The configuration
|
||||||
can be overridden by [`--buildkitd-flags`](#buildkitd-flags).
|
can be overridden by [`--buildkitd-flags`](#--buildkitd-flags-flags).
|
||||||
See an [example buildkitd configuration file](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md).
|
See an [example buildkitd configuration file](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md).
|
||||||
|
|
||||||
Note that if you create a `docker-container` builder and have specified
|
|
||||||
certificates for registries in the `buildkitd.toml` configuration, the files
|
|
||||||
will be copied into the container under `/etc/buildkit/certs` and configuration
|
|
||||||
will be updated to reflect that.
|
|
||||||
|
|
||||||
### <a name="driver"></a> Set the builder driver to use (--driver)
|
### <a name="driver"></a> Set the builder driver to use (--driver)
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -93,30 +92,17 @@ will be updated to reflect that.
|
|||||||
Sets the builder driver to be used. There are two available drivers, each have
|
Sets the builder driver to be used. There are two available drivers, each have
|
||||||
their own specificities.
|
their own specificities.
|
||||||
|
|
||||||
#### `docker` driver
|
- `docker` - Uses the builder that is built into the docker daemon. With this
|
||||||
|
driver, the [`--load`](buildx_build.md#--load) flag is implied by default on
|
||||||
|
`buildx build`. However, building multi-platform images or exporting cache is
|
||||||
|
not currently supported.
|
||||||
|
- `docker-container` - Uses a buildkit container that will be spawned via docker.
|
||||||
|
With this driver, both building multi-platform images and exporting cache are
|
||||||
|
supported. However, images built will not automatically appear in `docker images`
|
||||||
|
(see [`build --load`](buildx_build.md#--load)).
|
||||||
|
- `kubernetes` - Uses a kubernetes pods. With this driver, you can spin up pods
|
||||||
|
with defined buildkit container image to build your images.
|
||||||
|
|
||||||
Uses the builder that is built into the docker daemon. With this driver,
|
|
||||||
the [`--load`](buildx_build.md#load) flag is implied by default on
|
|
||||||
`buildx build`. However, building multi-platform images or exporting cache is
|
|
||||||
not currently supported.
|
|
||||||
|
|
||||||
#### `docker-container` driver
|
|
||||||
|
|
||||||
Uses a BuildKit container that will be spawned via docker. With this driver,
|
|
||||||
both building multi-platform images and exporting cache are supported.
|
|
||||||
|
|
||||||
Unlike `docker` driver, built images will not automatically appear in
|
|
||||||
`docker images` and [`build --load`](buildx_build.md#load) needs to be used
|
|
||||||
to achieve that.
|
|
||||||
|
|
||||||
#### `kubernetes` driver
|
|
||||||
|
|
||||||
Uses a kubernetes pods. With this driver, you can spin up pods with defined
|
|
||||||
BuildKit container image to build your images.
|
|
||||||
|
|
||||||
Unlike `docker` driver, built images will not automatically appear in
|
|
||||||
`docker images` and [`build --load`](buildx_build.md#load) needs to be used
|
|
||||||
to achieve that.
|
|
||||||
|
|
||||||
### <a name="driver-opt"></a> Set additional driver-specific options (--driver-opt)
|
### <a name="driver-opt"></a> Set additional driver-specific options (--driver-opt)
|
||||||
|
|
||||||
@@ -128,22 +114,24 @@ Passes additional driver-specific options. Details for each driver:
|
|||||||
|
|
||||||
- `docker` - No driver options
|
- `docker` - No driver options
|
||||||
- `docker-container`
|
- `docker-container`
|
||||||
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||||
- `network=NETMODE` - Sets the network mode for running the buildkit container.
|
- `network=NETMODE` - Sets the network mode for running the buildkit container.
|
||||||
- `cgroup-parent=CGROUP` - Sets the cgroup parent of the buildkit container if docker is using the "cgroupfs" driver. Defaults to `/docker/buildx`.
|
- Example:
|
||||||
|
|
||||||
|
```console
|
||||||
|
--driver docker-container --driver-opt image=moby/buildkit:master,network=host
|
||||||
|
```
|
||||||
- `kubernetes`
|
- `kubernetes`
|
||||||
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||||
- `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace.
|
- `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace.
|
||||||
- `replicas=N` - Sets the number of `Pod` replicas. Defaults to 1.
|
- `replicas=N` - Sets the number of `Pod` replicas. Defaults to 1.
|
||||||
- `requests.cpu` - Sets the request CPU value specified in units of Kubernetes CPU. Example `requests.cpu=100m`, `requests.cpu=2`
|
- `requests.cpu` - Sets the request CPU value specified in units of Kubernetes CPU. Example `requests.cpu=100m`, `requests.cpu=2`
|
||||||
- `requests.memory` - Sets the request memory value specified in bytes or with a valid suffix. Example `requests.memory=500Mi`, `requests.memory=4G`
|
- `requests.memory` - Sets the request memory value specified in bytes or with a valid suffix. Example `requests.memory=500Mi`, `requests.memory=4G`
|
||||||
- `limits.cpu` - Sets the limit CPU value specified in units of Kubernetes CPU. Example `limits.cpu=100m`, `limits.cpu=2`
|
- `limits.cpu` - Sets the limit CPU value specified in units of Kubernetes CPU. Example `limits.cpu=100m`, `limits.cpu=2`
|
||||||
- `limits.memory` - Sets the limit memory value specified in bytes or with a valid suffix. Example `limits.memory=500Mi`, `limits.memory=4G`
|
- `limits.memory` - Sets the limit memory value specified in bytes or with a valid suffix. Example `limits.memory=500Mi`, `limits.memory=4G`
|
||||||
- `nodeselector="label1=value1,label2=value2"` - Sets the kv of `Pod` nodeSelector. No Defaults. Example `nodeselector=kubernetes.io/arch=arm64`
|
- `nodeselector="label1=value1,label2=value2"` - Sets the kv of `Pod` nodeSelector. No Defaults. Example `nodeselector=kubernetes.io/arch=arm64`
|
||||||
- `rootless=(true|false)` - Run the container as a non-root user without `securityContext.privileged`. [Using Ubuntu host kernel is recommended](https://github.com/moby/buildkit/blob/master/docs/rootless.md). Defaults to false.
|
- `rootless=(true|false)` - Run the container as a non-root user without `securityContext.privileged`. [Using Ubuntu host kernel is recommended](https://github.com/moby/buildkit/blob/master/docs/rootless.md). Defaults to false.
|
||||||
- `loadbalance=(sticky|random)` - Load-balancing strategy. If set to "sticky", the pod is chosen using the hash of the context path. Defaults to "sticky"
|
- `loadbalance=(sticky|random)` - Load-balancing strategy. If set to "sticky", the pod is chosen using the hash of the context path. Defaults to "sticky"
|
||||||
- `qemu.install=(true|false)` - Install QEMU emulation for multi platforms support.
|
|
||||||
- `qemu.image=IMAGE` - Sets the QEMU emulation image. Defaults to `tonistiigi/binfmt:latest`
|
|
||||||
|
|
||||||
### <a name="leave"></a> Remove a node from a builder (--leave)
|
### <a name="leave"></a> Remove a node from a builder (--leave)
|
||||||
|
|
||||||
@@ -151,6 +139,8 @@ The `--leave` flag changes the action of the command to remove a node from a
|
|||||||
builder. The builder needs to be specified with `--name` and node that is removed
|
builder. The builder needs to be specified with `--name` and node that is removed
|
||||||
is set with `--node`.
|
is set with `--node`.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx create --name mybuilder --node mybuilder0 --leave
|
$ docker buildx create --name mybuilder --node mybuilder0 --leave
|
||||||
```
|
```
|
||||||
@@ -174,7 +164,7 @@ The `--node` flag specifies the name of the node to be created or modified. If
|
|||||||
none is specified, it is the name of the builder it belongs to, with an index
|
none is specified, it is the name of the builder it belongs to, with an index
|
||||||
number suffix.
|
number suffix.
|
||||||
|
|
||||||
### <a name="platform"></a> Set the platforms supported by the node (--platform)
|
### <a name="platform"></a> Set the platforms supported by the node
|
||||||
|
|
||||||
```
|
```
|
||||||
--platform PLATFORMS
|
--platform PLATFORMS
|
||||||
@@ -186,12 +176,14 @@ will also automatically detect the platforms it supports, but manual values take
|
|||||||
priority over the detected ones and can be used when multiple nodes support
|
priority over the detected ones and can be used when multiple nodes support
|
||||||
building for the same platform.
|
building for the same platform.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx create --platform linux/amd64
|
$ docker buildx create --platform linux/amd64
|
||||||
$ docker buildx create --platform linux/arm64,linux/arm/v8
|
$ docker buildx create --platform linux/arm64,linux/arm/v8
|
||||||
```
|
```
|
||||||
|
|
||||||
### <a name="use"></a> Automatically switch to the newly created builder (--use)
|
### <a name="use"></a> Automatically switch to the newly created builder
|
||||||
|
|
||||||
The `--use` flag automatically switches the current builder to the newly created
|
The `--use` flag automatically switches the current builder to the newly created
|
||||||
one. Equivalent to running `docker buildx use $(docker buildx create ...)`.
|
one. Equivalent to running `docker buildx use $(docker buildx create ...)`.
|
||||||
|
|||||||
@@ -9,17 +9,11 @@ Disk usage
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| `--builder string` | Override the configured builder instance |
|
||||||
| `--filter` | `filter` | | Provide filter values |
|
| `--filter filter` | Provide filter values |
|
||||||
| `--verbose` | | | Provide a more verbose output |
|
| `--verbose` | Provide a more verbose output |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
|
||||||
|
|
||||||
Same as [`buildx --builder`](buildx.md#builder).
|
|
||||||
@@ -12,15 +12,9 @@ Commands to work on images in registry
|
|||||||
| Name | Description |
|
| Name | Description |
|
||||||
| --- | --- |
|
| --- | --- |
|
||||||
| [`create`](buildx_imagetools_create.md) | Create a new image based on source images |
|
| [`create`](buildx_imagetools_create.md) | Create a new image based on source images |
|
||||||
| [`inspect`](buildx_imagetools_inspect.md) | Show details of an image in the registry |
|
| [`inspect`](buildx_imagetools_inspect.md) | Show details of image in the registry |
|
||||||
|
|
||||||
|
|
||||||
### Options
|
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
|
||||||
| --- | --- | --- | --- |
|
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
@@ -28,9 +22,3 @@ Commands to work on images in registry
|
|||||||
|
|
||||||
Imagetools contains commands for working with manifest lists in the registry.
|
Imagetools contains commands for working with manifest lists in the registry.
|
||||||
These commands are useful for inspecting multi-platform build results.
|
These commands are useful for inspecting multi-platform build results.
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
|
||||||
|
|
||||||
Same as [`buildx --builder`](buildx.md#builder).
|
|
||||||
|
|||||||
@@ -9,19 +9,22 @@ Create a new image based on source images
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--append`](#append) | | | Append to existing manifest |
|
| [`--append`](#append) | Append to existing manifest |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| `--builder string` | Override the configured builder instance |
|
||||||
| [`--dry-run`](#dry-run) | | | Show final image instead of pushing |
|
| [`--dry-run`](#dry-run) | Show final image instead of pushing |
|
||||||
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Read source descriptor from file |
|
| [`-f`](#file), [`--file stringArray`](#file) | Read source descriptor from file |
|
||||||
| [`-t`](#tag), [`--tag`](#tag) | `stringArray` | | Set reference for new image |
|
| [`-t`](#tag), [`--tag stringArray`](#tag) | Set reference for new image |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
## Description
|
## Description
|
||||||
|
|
||||||
|
Imagetools contains commands for working with manifest lists in the registry.
|
||||||
|
These commands are useful for inspecting multi-platform build results.
|
||||||
|
|
||||||
Create a new manifest list based on source manifests. The source manifests can
|
Create a new manifest list based on source manifests. The source manifests can
|
||||||
be manifest lists or single platform distribution manifests and must already
|
be manifest lists or single platform distribution manifests and must already
|
||||||
exist in the registry where the new manifest is created. If only one source is
|
exist in the registry where the new manifest is created. If only one source is
|
||||||
@@ -34,10 +37,6 @@ specified, create performs a carbon copy.
|
|||||||
Use the `--append` flag to append the new sources to an existing manifest list
|
Use the `--append` flag to append the new sources to an existing manifest list
|
||||||
in the destination.
|
in the destination.
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
|
||||||
|
|
||||||
Same as [`buildx --builder`](buildx.md#builder).
|
|
||||||
|
|
||||||
### <a name="dry-run"></a> Show final image instead of pushing (--dry-run)
|
### <a name="dry-run"></a> Show final image instead of pushing (--dry-run)
|
||||||
|
|
||||||
Use the `--dry-run` flag to not push the image, just show it.
|
Use the `--dry-run` flag to not push the image, just show it.
|
||||||
@@ -54,15 +53,16 @@ or a JSON of OCI descriptor object.
|
|||||||
In order to define annotations or additional platform properties like `os.version` and
|
In order to define annotations or additional platform properties like `os.version` and
|
||||||
`os.features` you need to add them in the OCI descriptor object encoded in JSON.
|
`os.features` you need to add them in the OCI descriptor object encoded in JSON.
|
||||||
|
|
||||||
```console
|
```
|
||||||
$ docker buildx imagetools inspect --raw alpine | jq '.manifests[0] | .platform."os.version"="10.1"' > descr.json
|
docker buildx imagetools inspect --raw alpine | jq '.manifests[0] | .platform."os.version"="10.1"' > descr.json
|
||||||
$ docker buildx imagetools create -f descr.json myuser/image
|
docker buildx imagetools create -f descr.json myuser/image
|
||||||
```
|
```
|
||||||
|
|
||||||
The descriptor in the file is merged with existing descriptor in the registry if it exists.
|
The descriptor in the file is merged with existing descriptor in the registry if it exists.
|
||||||
|
|
||||||
The supported fields for the descriptor are defined in [OCI spec](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) .
|
The supported fields for the descriptor are defined in [OCI spec](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) .
|
||||||
|
|
||||||
|
|
||||||
### <a name="tag"></a> Set reference for new image (-t, --tag)
|
### <a name="tag"></a> Set reference for new image (-t, --tag)
|
||||||
|
|
||||||
```
|
```
|
||||||
@@ -71,7 +71,10 @@ The supported fields for the descriptor are defined in [OCI spec](https://github
|
|||||||
|
|
||||||
Use the `-t` or `--tag` flag to set the name of the image to be created.
|
Use the `-t` or `--tag` flag to set the name of the image to be created.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx imagetools create --dry-run alpine@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907 sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
$ docker buildx imagetools create --dry-run alpine@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907 sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
||||||
|
|
||||||
$ docker buildx imagetools create -t tonistiigi/myapp -f image1 -f image2
|
$ docker buildx imagetools create -t tonistiigi/myapp -f image1 -f image2
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -5,628 +5,43 @@ docker buildx imagetools inspect [OPTIONS] NAME
|
|||||||
```
|
```
|
||||||
|
|
||||||
<!---MARKER_GEN_START-->
|
<!---MARKER_GEN_START-->
|
||||||
Show details of an image in the registry
|
Show details of image in the registry
|
||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| `--builder string` | Override the configured builder instance |
|
||||||
| [`--format`](#format) | `string` | `{{.Manifest}}` | Format the output using the given Go template |
|
| [`--raw`](#raw) | Show original JSON manifest |
|
||||||
| [`--raw`](#raw) | | | Show original, unformatted JSON manifest |
|
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
## Description
|
## Description
|
||||||
|
|
||||||
Show details of an image in the registry.
|
Show details of image in the registry.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx imagetools inspect alpine
|
$ docker buildx imagetools inspect alpine
|
||||||
|
|
||||||
Name: docker.io/library/alpine:latest
|
Name: docker.io/library/alpine:latest
|
||||||
MediaType: application/vnd.docker.distribution.manifest.list.v2+json
|
MediaType: application/vnd.docker.distribution.manifest.list.v2+json
|
||||||
Digest: sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300
|
Digest: sha256:28ef97b8686a0b5399129e9b763d5b7e5ff03576aa5580d6f4182a49c5fe1913
|
||||||
|
|
||||||
Manifests:
|
Manifests:
|
||||||
Name: docker.io/library/alpine:latest@sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3
|
Name: docker.io/library/alpine:latest@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||||
Platform: linux/amd64
|
Platform: linux/amd64
|
||||||
|
|
||||||
Name: docker.io/library/alpine:latest@sha256:e047bc2af17934d38c5a7fa9f46d443f1de3a7675546402592ef805cfa929f9d
|
Name: docker.io/library/alpine:latest@sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||||
Platform: linux/arm/v6
|
Platform: linux/arm/v6
|
||||||
|
...
|
||||||
Name: docker.io/library/alpine:latest@sha256:8483ecd016885d8dba70426fda133c30466f661bb041490d525658f1aac73822
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/arm/v7
|
|
||||||
|
|
||||||
Name: docker.io/library/alpine:latest@sha256:c74f1b1166784193ea6c8f9440263b9be6cae07dfe35e32a5df7a31358ac2060
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/arm64/v8
|
|
||||||
|
|
||||||
Name: docker.io/library/alpine:latest@sha256:2689e157117d2da668ad4699549e55eba1ceb79cb7862368b30919f0488213f4
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/386
|
|
||||||
|
|
||||||
Name: docker.io/library/alpine:latest@sha256:2042a492bcdd847a01cd7f119cd48caa180da696ed2aedd085001a78664407d6
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/ppc64le
|
|
||||||
|
|
||||||
Name: docker.io/library/alpine:latest@sha256:49e322ab6690e73a4909f787bcbdb873631264ff4a108cddfd9f9c249ba1d58e
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/s390x
|
|
||||||
```
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
|
||||||
|
|
||||||
Same as [`buildx --builder`](buildx.md#builder).
|
|
||||||
|
|
||||||
### <a name="format"></a> Format the output (--format)
|
|
||||||
|
|
||||||
Format the output using the given Go template. Defaults to `{{.Manifest}}` if
|
|
||||||
unset. Following fields are available:
|
|
||||||
|
|
||||||
* `.Name`: provides the reference of the image
|
|
||||||
* `.Manifest`: provides the manifest or manifest list
|
|
||||||
* `.Image`: provides the image config
|
|
||||||
* `.BuildInfo`: provides [build info from image config](https://github.com/moby/buildkit/blob/master/docs/build-repro.md#image-config)
|
|
||||||
|
|
||||||
#### `.Name`
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect alpine --format "{{.Name}}"
|
|
||||||
Name: docker.io/library/alpine:latest
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `.Manifest`
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect crazymax/loop --format "{{.Manifest}}"
|
|
||||||
Name: docker.io/crazymax/loop:latest
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Digest: sha256:08602e7340970e92bde5e0a2e887c1fde4d9ae753d1e05efb4c8ef3b609f97f1
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect moby/buildkit:master --format "{{.Manifest}}"
|
|
||||||
Name: docker.io/moby/buildkit:master
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.list.v2+json
|
|
||||||
Digest: sha256:3183f7ce54d1efb44c34b84f428ae10aaf141e553c6b52a7ff44cc7083a05a66
|
|
||||||
|
|
||||||
Manifests:
|
|
||||||
Name: docker.io/moby/buildkit:master@sha256:667d28c9fb33820ce686887a717a148e89fa77f9097f9352996bbcce99d352b1
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/amd64
|
|
||||||
|
|
||||||
Name: docker.io/moby/buildkit:master@sha256:71789527b64ab3d7b3de01d364b449cd7f7a3da758218fbf73b9c9aae05a6775
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/arm/v7
|
|
||||||
|
|
||||||
Name: docker.io/moby/buildkit:master@sha256:fb64667e1ce6ab0d05478f3a8402af07b27737598dcf9a510fb1d792b13a66be
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/arm64
|
|
||||||
|
|
||||||
Name: docker.io/moby/buildkit:master@sha256:1c3ddf95a0788e23f72f25800c05abc4458946685e2b66788c3d978cde6da92b
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/s390x
|
|
||||||
|
|
||||||
Name: docker.io/moby/buildkit:master@sha256:05bcde6d460a284e5bc88026cd070277e8380355de3126cbc8fe8a452708c6b1
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/ppc64le
|
|
||||||
|
|
||||||
Name: docker.io/moby/buildkit:master@sha256:c04c57765304ab84f4f9807fff3e11605c3a60e16435c734b02c723680f6bd6e
|
|
||||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
|
||||||
Platform: linux/riscv64
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `.BuildInfo`
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect crazymax/buildx:buildinfo --format "{{.BuildInfo}}"
|
|
||||||
Name: docker.io/crazymax/buildx:buildinfo
|
|
||||||
Frontend: dockerfile.v0
|
|
||||||
Attrs:
|
|
||||||
filename: Dockerfile
|
|
||||||
source: docker/dockerfile-upstream:master-labs
|
|
||||||
build-arg:bar: foo
|
|
||||||
build-arg:foo: bar
|
|
||||||
Sources:
|
|
||||||
Type: docker-image
|
|
||||||
Ref: docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0
|
|
||||||
Pin: sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0
|
|
||||||
|
|
||||||
Type: docker-image
|
|
||||||
Ref: docker.io/library/alpine:3.13
|
|
||||||
Pin: sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c
|
|
||||||
|
|
||||||
Type: docker-image
|
|
||||||
Ref: docker.io/moby/buildkit:v0.9.0
|
|
||||||
Pin: sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab
|
|
||||||
|
|
||||||
Type: docker-image
|
|
||||||
Ref: docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04
|
|
||||||
Pin: sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04
|
|
||||||
|
|
||||||
Type: http
|
|
||||||
Ref: https://raw.githubusercontent.com/moby/moby/master/README.md
|
|
||||||
Pin: sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c
|
|
||||||
```
|
|
||||||
|
|
||||||
#### JSON output
|
|
||||||
|
|
||||||
A `json` go template func is also available if you want to render fields as
|
|
||||||
JSON bytes:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect crazymax/loop --format "{{json .Manifest}}"
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:08602e7340970e92bde5e0a2e887c1fde4d9ae753d1e05efb4c8ef3b609f97f1",
|
|
||||||
"size": 949
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect moby/buildkit:master --format "{{json .Manifest}}"
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"schemaVersion": 2,
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
|
|
||||||
"digest": "sha256:79d97f205e2799d99a3a8ae2a1ef17acb331e11784262c3faada847dc6972c52",
|
|
||||||
"size": 2010,
|
|
||||||
"manifests": [
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:bd1e78f06de26610fadf4eb9d04b1a45a545799d6342701726e952cc0c11c912",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "amd64",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:d37dcced63ec0965824fca644f0ac9efad8569434ec15b4c83adfcb3dcfc743b",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "arm",
|
|
||||||
"os": "linux",
|
|
||||||
"variant": "v7"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:ce142eb2255e6af46f2809e159fd03081697c7605a3de03b9cbe9a52ddb244bf",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "arm64",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:f59bfb5062fff76ce464bfa4e25ebaaaac887d6818238e119d68613c456d360c",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "s390x",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:cc96426e0c50a78105d5637d31356db5dd6ec594f21b24276e534a32da09645c",
|
|
||||||
"size": 1159,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "ppc64le",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:39f9c1e2878e6c333acb23187d6b205ce82ed934c60da326cb2c698192631478",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "riscv64",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect crazymax/buildx:buildinfo --format "{{json .BuildInfo}}"
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"frontend": "dockerfile.v0",
|
|
||||||
"attrs": {
|
|
||||||
"build-arg:bar": "foo",
|
|
||||||
"build-arg:foo": "bar",
|
|
||||||
"filename": "Dockerfile",
|
|
||||||
"source": "crazymax/dockerfile:buildattrs"
|
|
||||||
},
|
|
||||||
"sources": [
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
|
|
||||||
"pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/library/alpine:3.13@sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c",
|
|
||||||
"pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/moby/buildkit:v0.9.0@sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab",
|
|
||||||
"pin": "sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04",
|
|
||||||
"pin": "sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "http",
|
|
||||||
"ref": "https://raw.githubusercontent.com/moby/moby/master/README.md",
|
|
||||||
"pin": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect crazymax/buildx:buildinfo --format "{{json .}}"
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"name": "crazymax/buildx:buildinfo",
|
|
||||||
"manifest": {
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:899d2c7acbc124d406820857bb51d9089717bbe4e22b97eb4bc5789e99f09f83",
|
|
||||||
"size": 2628
|
|
||||||
},
|
|
||||||
"image": {
|
|
||||||
"created": "2022-02-24T12:27:43.627154558Z",
|
|
||||||
"architecture": "amd64",
|
|
||||||
"os": "linux",
|
|
||||||
"config": {
|
|
||||||
"Env": [
|
|
||||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
|
||||||
"DOCKER_TLS_CERTDIR=/certs",
|
|
||||||
"DOCKER_CLI_EXPERIMENTAL=enabled"
|
|
||||||
],
|
|
||||||
"Entrypoint": [
|
|
||||||
"docker-entrypoint.sh"
|
|
||||||
],
|
|
||||||
"Cmd": [
|
|
||||||
"sh"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"rootfs": {
|
|
||||||
"type": "layers",
|
|
||||||
"diff_ids": [
|
|
||||||
"sha256:7fcb75871b2101082203959c83514ac8a9f4ecfee77a0fe9aa73bbe56afdf1b4",
|
|
||||||
"sha256:d3c0b963ff5684160641f936d6a4aa14efc8ff27b6edac255c07f2d03ff92e82",
|
|
||||||
"sha256:3f8d78f13fa9b1f35d3bc3f1351d03a027c38018c37baca73f93eecdea17f244",
|
|
||||||
"sha256:8e6eb1137b182ae0c3f5d40ca46341fda2eaeeeb5fa516a9a2bf96171238e2e0",
|
|
||||||
"sha256:fde4c869a56b54dd76d7352ddaa813fd96202bda30b9dceb2c2f2ad22fa2e6ce",
|
|
||||||
"sha256:52025823edb284321af7846419899234b3c66219bf06061692b709875ed0760f",
|
|
||||||
"sha256:50adb5982dbf6126c7cf279ac3181d1e39fc9116b610b947a3dadae6f7e7c5bc",
|
|
||||||
"sha256:9801c319e1c66c5d295e78b2d3e80547e73c7e3c63a4b71e97c8ca357224af24",
|
|
||||||
"sha256:dfbfac44d5d228c49b42194c8a2f470abd6916d072f612a6fb14318e94fde8ae",
|
|
||||||
"sha256:3dfb74e19dedf61568b917c19b0fd3ee4580870027ca0b6054baf239855d1322",
|
|
||||||
"sha256:b182e707c23e4f19be73f9022a99d2d1ca7bf1ca8f280d40e4d1c10a6f51550e"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"history": [
|
|
||||||
{
|
|
||||||
"created": "2021-11-12T17:19:58.698676655Z",
|
|
||||||
"created_by": "/bin/sh -c #(nop) ADD file:5a707b9d6cb5fff532e4c2141bc35707593f21da5528c9e71ae2ddb6ba4a4eb6 in / "
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2021-11-12T17:19:58.948920855Z",
|
|
||||||
"created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]",
|
|
||||||
"empty_layer": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:38.285594601Z",
|
|
||||||
"created_by": "RUN /bin/sh -c apk --update --no-cache add bash ca-certificates openssh-client \u0026\u0026 rm -rf /tmp/* /var/cache/apk/* # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:41.061874167Z",
|
|
||||||
"created_by": "COPY /opt/docker/ /usr/local/bin/ # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:41.174098947Z",
|
|
||||||
"created_by": "COPY /usr/bin/buildctl /usr/local/bin/buildctl # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:41.320343683Z",
|
|
||||||
"created_by": "COPY /usr/bin/buildkit* /usr/local/bin/ # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:41.447149933Z",
|
|
||||||
"created_by": "COPY /buildx /usr/libexec/docker/cli-plugins/docker-buildx # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.057722191Z",
|
|
||||||
"created_by": "COPY /opt/docker-compose /usr/libexec/docker/cli-plugins/docker-compose # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.145224134Z",
|
|
||||||
"created_by": "ADD https://raw.githubusercontent.com/moby/moby/master/README.md / # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.422212427Z",
|
|
||||||
"created_by": "ENV DOCKER_TLS_CERTDIR=/certs",
|
|
||||||
"comment": "buildkit.dockerfile.v0",
|
|
||||||
"empty_layer": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.422212427Z",
|
|
||||||
"created_by": "ENV DOCKER_CLI_EXPERIMENTAL=enabled",
|
|
||||||
"comment": "buildkit.dockerfile.v0",
|
|
||||||
"empty_layer": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.422212427Z",
|
|
||||||
"created_by": "RUN /bin/sh -c docker --version \u0026\u0026 buildkitd --version \u0026\u0026 buildctl --version \u0026\u0026 docker buildx version \u0026\u0026 docker compose version \u0026\u0026 mkdir /certs /certs/client \u0026\u0026 chmod 1777 /certs /certs/client # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.514320155Z",
|
|
||||||
"created_by": "COPY rootfs/modprobe.sh /usr/local/bin/modprobe # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.627154558Z",
|
|
||||||
"created_by": "COPY rootfs/docker-entrypoint.sh /usr/local/bin/ # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.627154558Z",
|
|
||||||
"created_by": "ENTRYPOINT [\"docker-entrypoint.sh\"]",
|
|
||||||
"comment": "buildkit.dockerfile.v0",
|
|
||||||
"empty_layer": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T12:27:43.627154558Z",
|
|
||||||
"created_by": "CMD [\"sh\"]",
|
|
||||||
"comment": "buildkit.dockerfile.v0",
|
|
||||||
"empty_layer": true
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"buildinfo": {
|
|
||||||
"frontend": "dockerfile.v0",
|
|
||||||
"attrs": {
|
|
||||||
"build-arg:bar": "foo",
|
|
||||||
"build-arg:foo": "bar",
|
|
||||||
"filename": "Dockerfile",
|
|
||||||
"source": "docker/dockerfile-upstream:master-labs"
|
|
||||||
},
|
|
||||||
"sources": [
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
|
|
||||||
"pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/library/alpine:3.13",
|
|
||||||
"pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/moby/buildkit:v0.9.0",
|
|
||||||
"pin": "sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "docker-image",
|
|
||||||
"ref": "docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04",
|
|
||||||
"pin": "sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"type": "http",
|
|
||||||
"ref": "https://raw.githubusercontent.com/moby/moby/master/README.md",
|
|
||||||
"pin": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Multi-platform
|
|
||||||
|
|
||||||
Multi-platform images are supported for `.Image` and `.BuildInfo` fields. If
|
|
||||||
you want to pick up a specific platform, you can specify it using the `index`
|
|
||||||
go template function:
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect --format '{{json (index .Image "linux/s390x")}}' moby/buildkit:master
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"created": "2022-02-25T17:13:27.89891722Z",
|
|
||||||
"architecture": "s390x",
|
|
||||||
"os": "linux",
|
|
||||||
"config": {
|
|
||||||
"Env": [
|
|
||||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
|
||||||
],
|
|
||||||
"Entrypoint": [
|
|
||||||
"buildkitd"
|
|
||||||
],
|
|
||||||
"Volumes": {
|
|
||||||
"/var/lib/buildkit": {}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"rootfs": {
|
|
||||||
"type": "layers",
|
|
||||||
"diff_ids": [
|
|
||||||
"sha256:41048e32d0684349141cf05f629c5fc3c5915d1f3426b66dbb8953a540e01e1e",
|
|
||||||
"sha256:2651209b9208fff6c053bc3c17353cb07874e50f1a9bc96d6afd03aef63de76a",
|
|
||||||
"sha256:6741ed7e73039d853fa8902246a4c7e8bf9dd09652fd1b08251bc5f9e8876a7f",
|
|
||||||
"sha256:92ac046adeeb65c86ae3f0b458dee04ad4a462e417661c04d77642c66494f69b"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"history": [
|
|
||||||
{
|
|
||||||
"created": "2021-11-24T20:41:23.709681315Z",
|
|
||||||
"created_by": "/bin/sh -c #(nop) ADD file:cd24c711a2ef431b3ff94f9a02bfc42f159bc60de1d0eceecafea4e8af02441d in / "
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2021-11-24T20:41:23.94211262Z",
|
|
||||||
"created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]",
|
|
||||||
"empty_layer": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-01-26T18:15:21.449825391Z",
|
|
||||||
"created_by": "RUN /bin/sh -c apk add --no-cache fuse3 git openssh pigz xz \u0026\u0026 ln -s fusermount3 /usr/bin/fusermount # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-24T00:34:00.924540012Z",
|
|
||||||
"created_by": "COPY examples/buildctl-daemonless/buildctl-daemonless.sh /usr/bin/ # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-25T17:13:27.89891722Z",
|
|
||||||
"created_by": "VOLUME [/var/lib/buildkit]",
|
|
||||||
"comment": "buildkit.dockerfile.v0",
|
|
||||||
"empty_layer": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-25T17:13:27.89891722Z",
|
|
||||||
"created_by": "COPY / /usr/bin/ # buildkit",
|
|
||||||
"comment": "buildkit.dockerfile.v0"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"created": "2022-02-25T17:13:27.89891722Z",
|
|
||||||
"created_by": "ENTRYPOINT [\"buildkitd\"]",
|
|
||||||
"comment": "buildkit.dockerfile.v0",
|
|
||||||
"empty_layer": true
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### <a name="raw"></a> Show original, unformatted JSON manifest (--raw)
|
### <a name="raw"></a> Show original, unformatted JSON manifest (--raw)
|
||||||
|
|
||||||
Use the `--raw` option to print the unformatted JSON manifest bytes.
|
Use the `--raw` option to print the original JSON bytes instead of the formatted
|
||||||
|
output.
|
||||||
> `jq` is used here to get a better rendering of the output result.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect --raw crazymax/loop | jq
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"schemaVersion": 2,
|
|
||||||
"config": {
|
|
||||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
|
||||||
"digest": "sha256:7ace7d324e79b360b2db8b820d83081863d96d22e734cdf297a8e7fd83f6ceb3",
|
|
||||||
"size": 2298
|
|
||||||
},
|
|
||||||
"layers": [
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
|
||||||
"digest": "sha256:5843afab387455b37944e709ee8c78d7520df80f8d01cf7f861aae63beeddb6b",
|
|
||||||
"size": 2811478
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
|
||||||
"digest": "sha256:726d3732a87e1c430d67e8969de6b222a889d45e045ebae1a008a37ba38f3b1f",
|
|
||||||
"size": 1776812
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
|
||||||
"digest": "sha256:5d7cf9b33148a8f220c84f27dd2cfae46aca019a3ea3fbf7274f6d6dbfae8f3b",
|
|
||||||
"size": 382855
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx imagetools inspect --raw moby/buildkit:master | jq
|
|
||||||
```
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
|
|
||||||
"schemaVersion": 2,
|
|
||||||
"manifests": [
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:667d28c9fb33820ce686887a717a148e89fa77f9097f9352996bbcce99d352b1",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "amd64",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:71789527b64ab3d7b3de01d364b449cd7f7a3da758218fbf73b9c9aae05a6775",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "arm",
|
|
||||||
"os": "linux",
|
|
||||||
"variant": "v7"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:fb64667e1ce6ab0d05478f3a8402af07b27737598dcf9a510fb1d792b13a66be",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "arm64",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:1c3ddf95a0788e23f72f25800c05abc4458946685e2b66788c3d978cde6da92b",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "s390x",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:05bcde6d460a284e5bc88026cd070277e8380355de3126cbc8fe8a452708c6b1",
|
|
||||||
"size": 1159,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "ppc64le",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
|
||||||
"digest": "sha256:c04c57765304ab84f4f9807fff3e11605c3a60e16435c734b02c723680f6bd6e",
|
|
||||||
"size": 1158,
|
|
||||||
"platform": {
|
|
||||||
"architecture": "riscv64",
|
|
||||||
"os": "linux"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|||||||
@@ -9,10 +9,10 @@ Inspect current builder instance
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--bootstrap`](#bootstrap) | | | Ensure builder has booted before inspecting |
|
| [`--bootstrap`](#bootstrap) | Ensure builder has booted before inspecting |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| `--builder string` | Override the configured builder instance |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
@@ -23,19 +23,6 @@ Shows information about the current or specified builder.
|
|||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
### <a name="bootstrap"></a> Ensure that the builder is running before inspecting (--bootstrap)
|
|
||||||
|
|
||||||
Use the `--bootstrap` option to ensure that the builder is running before
|
|
||||||
inspecting it. If the driver is `docker-container`, then `--bootstrap` starts
|
|
||||||
the buildkit container and waits until it is operational. Bootstrapping is
|
|
||||||
automatically done during build, and therefore not necessary. The same BuildKit
|
|
||||||
container is used during the lifetime of the associated builder node (as
|
|
||||||
displayed in `buildx ls`).
|
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
|
||||||
|
|
||||||
Same as [`buildx --builder`](buildx.md#builder).
|
|
||||||
|
|
||||||
### Get information about a builder instance
|
### Get information about a builder instance
|
||||||
|
|
||||||
By default, `inspect` shows information about the current builder. Specify the
|
By default, `inspect` shows information about the current builder. Specify the
|
||||||
@@ -60,3 +47,12 @@ Endpoint: ssh://ubuntu@1.2.3.4
|
|||||||
Status: running
|
Status: running
|
||||||
Platforms: linux/arm64, linux/arm/v7, linux/arm/v6
|
Platforms: linux/arm64, linux/arm/v7, linux/arm/v6
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### <a name="bootstrap"></a> Ensure that the builder is running before inspecting (--bootstrap)
|
||||||
|
|
||||||
|
Use the `--bootstrap` option to ensure that the builder is running before
|
||||||
|
inspecting it. If the driver is `docker-container`, then `--bootstrap` starts
|
||||||
|
the buildkit container and waits until it is operational. Bootstrapping is
|
||||||
|
automatically done during build, and therefore not necessary. The same BuildKit
|
||||||
|
container is used during the lifetime of the associated builder node (as
|
||||||
|
displayed in `buildx ls`).
|
||||||
|
|||||||
@@ -14,17 +14,18 @@ List builder instances
|
|||||||
|
|
||||||
Lists all builder instances and the nodes for each instance
|
Lists all builder instances and the nodes for each instance
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx ls
|
$ docker buildx ls
|
||||||
|
|
||||||
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
|
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
|
||||||
elated_tesla * docker-container
|
elated_tesla * docker-container
|
||||||
elated_tesla0 unix:///var/run/docker.sock running linux/amd64
|
elated_tesla0 unix:///var/run/docker.sock running linux/amd64
|
||||||
elated_tesla1 ssh://ubuntu@1.2.3.4 running linux/arm64*, linux/arm/v7, linux/arm/v6
|
elated_tesla1 ssh://ubuntu@1.2.3.4 running linux/arm64, linux/arm/v7, linux/arm/v6
|
||||||
default docker
|
default docker
|
||||||
default default running linux/amd64
|
default default running linux/amd64
|
||||||
```
|
```
|
||||||
|
|
||||||
Each builder has one or more nodes associated with it. The current builder's
|
Each builder has one or more nodes associated with it. The current builder's
|
||||||
name is marked with a `*` in `NAME/NODE` and explicit node to build against for
|
name is marked with a `*`.
|
||||||
the target platform marked with a `*` in the `PLATFORMS` column.
|
|
||||||
|
|||||||
@@ -9,20 +9,15 @@ Remove build cache
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| `-a`, `--all` | | | Remove all unused images, not just dangling ones |
|
| `-a`, `--all` | Remove all unused images, not just dangling ones |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| `--builder string` | Override the configured builder instance |
|
||||||
| `--filter` | `filter` | | Provide filter values (e.g., `until=24h`) |
|
| `--filter filter` | Provide filter values (e.g. 'until=24h') |
|
||||||
| `-f`, `--force` | | | Do not prompt for confirmation |
|
| `-f`, `--force` | Do not prompt for confirmation |
|
||||||
| `--keep-storage` | `bytes` | `0` | Amount of disk space to keep for cache |
|
| `--keep-storage bytes` | Amount of disk space to keep for cache |
|
||||||
| `--verbose` | | | Provide a more verbose output |
|
| `--verbose` | Provide a more verbose output |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
|
||||||
|
|
||||||
Same as [`buildx --builder`](buildx.md#builder).
|
|
||||||
|
|||||||
@@ -9,13 +9,10 @@ Remove a builder instance
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--all-inactive`](#all-inactive) | | | Remove all inactive builders |
|
| `--builder string` | Override the configured builder instance |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| [`--keep-state`](#keep-state) | Keep BuildKit state |
|
||||||
| [`-f`](#force), [`--force`](#force) | | | Do not prompt for confirmation |
|
|
||||||
| [`--keep-daemon`](#keep-daemon) | | | Keep the buildkitd daemon running |
|
|
||||||
| [`--keep-state`](#keep-state) | | | Keep BuildKit state |
|
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
@@ -27,32 +24,6 @@ default builder.
|
|||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
### <a name="all-inactive"></a> Remove all inactive builders (--all-inactive)
|
|
||||||
|
|
||||||
Remove builders that are not in running state.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx rm --all-inactive
|
|
||||||
WARNING! This will remove all builders that are not in running state. Are you sure you want to continue? [y/N] y
|
|
||||||
```
|
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
|
||||||
|
|
||||||
Same as [`buildx --builder`](buildx.md#builder).
|
|
||||||
|
|
||||||
### <a name="force"></a> Do not prompt for confirmation (--force)
|
|
||||||
|
|
||||||
Do not prompt for confirmation before removing inactive builders.
|
|
||||||
|
|
||||||
```console
|
|
||||||
$ docker buildx rm --all-inactive --force
|
|
||||||
```
|
|
||||||
|
|
||||||
### <a name="keep-daemon"></a> Keep the buildkitd daemon running (--keep-daemon)
|
|
||||||
|
|
||||||
Keep the buildkitd daemon running after the buildx context is removed. This is useful when you manage buildkitd daemons and buildx contexts independently.
|
|
||||||
Currently, only supported by the [`docker-container` and `kubernetes` drivers](buildx_create.md#driver).
|
|
||||||
|
|
||||||
### <a name="keep-state"></a> Keep BuildKit state (--keep-state)
|
### <a name="keep-state"></a> Keep BuildKit state (--keep-state)
|
||||||
|
|
||||||
Keep BuildKit state, so it can be reused by a new builder with the same name.
|
Keep BuildKit state, so it can be reused by a new builder with the same name.
|
||||||
|
|||||||
@@ -7,12 +7,6 @@ docker buildx stop [NAME]
|
|||||||
<!---MARKER_GEN_START-->
|
<!---MARKER_GEN_START-->
|
||||||
Stop builder instance
|
Stop builder instance
|
||||||
|
|
||||||
### Options
|
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
|
||||||
| --- | --- | --- | --- |
|
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
@@ -20,9 +14,3 @@ Stop builder instance
|
|||||||
|
|
||||||
Stops the specified or current builder. This will not prevent buildx build to
|
Stops the specified or current builder. This will not prevent buildx build to
|
||||||
restart the builder. The implementation of stop depends on the driver.
|
restart the builder. The implementation of stop depends on the driver.
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
|
||||||
|
|
||||||
Same as [`buildx --builder`](buildx.md#builder).
|
|
||||||
|
|||||||
@@ -9,11 +9,11 @@ Set the current builder instance
|
|||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
| Name | Type | Default | Description |
|
| Name | Description |
|
||||||
| --- | --- | --- | --- |
|
| --- | --- |
|
||||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
| `--builder string` | Override the configured builder instance |
|
||||||
| `--default` | | | Set builder as default for current context |
|
| `--default` | Set builder as default for current context |
|
||||||
| `--global` | | | Builder persists context changes |
|
| `--global` | Builder persists context changes |
|
||||||
|
|
||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
@@ -23,9 +23,3 @@ Set the current builder instance
|
|||||||
Switches the current builder instance. Build commands invoked after this command
|
Switches the current builder instance. Build commands invoked after this command
|
||||||
will run on a specified builder. Alternatively, a context name can be used to
|
will run on a specified builder. Alternatively, a context name can be used to
|
||||||
switch to the default builder of that context.
|
switch to the default builder of that context.
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
|
||||||
|
|
||||||
Same as [`buildx --builder`](buildx.md#builder).
|
|
||||||
|
|||||||
@@ -10,9 +10,10 @@ Show buildx version information
|
|||||||
|
|
||||||
<!---MARKER_GEN_END-->
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
## Description
|
## Examples
|
||||||
|
|
||||||
|
### View version information
|
||||||
|
|
||||||
View version information
|
|
||||||
|
|
||||||
```console
|
```console
|
||||||
$ docker buildx version
|
$ docker buildx version
|
||||||
|
|||||||
@@ -2,6 +2,5 @@ package bkimage
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
DefaultImage = "moby/buildkit:buildx-stable-1" // TODO: make this verified
|
DefaultImage = "moby/buildkit:buildx-stable-1" // TODO: make this verified
|
||||||
QemuImage = "tonistiigi/binfmt:latest" // TODO: make this verified
|
|
||||||
DefaultRootlessImage = DefaultImage + "-rootless"
|
DefaultRootlessImage = DefaultImage + "-rootless"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,19 +1,17 @@
|
|||||||
package docker
|
package docker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"archive/tar"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/driver/bkimage"
|
"github.com/docker/buildx/driver/bkimage"
|
||||||
"github.com/docker/buildx/util/confutil"
|
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
@@ -22,8 +20,6 @@ import (
|
|||||||
"github.com/docker/docker/api/types/mount"
|
"github.com/docker/docker/api/types/mount"
|
||||||
"github.com/docker/docker/api/types/network"
|
"github.com/docker/docker/api/types/network"
|
||||||
dockerclient "github.com/docker/docker/client"
|
dockerclient "github.com/docker/docker/client"
|
||||||
dockerarchive "github.com/docker/docker/pkg/archive"
|
|
||||||
"github.com/docker/docker/pkg/idtools"
|
|
||||||
"github.com/docker/docker/pkg/stdcopy"
|
"github.com/docker/docker/pkg/stdcopy"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/util/tracing/detect"
|
"github.com/moby/buildkit/util/tracing/detect"
|
||||||
@@ -32,16 +28,20 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
volumeStateSuffix = "_state"
|
volumeStateSuffix = "_state"
|
||||||
|
|
||||||
|
// containerStateDir is the location where buildkitd inside the container
|
||||||
|
// stores its state. The container driver creates a Linux container, so
|
||||||
|
// this should match the location for Linux, as defined in:
|
||||||
|
// https://github.com/moby/buildkit/blob/v0.9.0/util/appdefaults/appdefaults_unix.go#L11-L15
|
||||||
|
containerBuildKitRootDir = "/var/lib/buildkit"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Driver struct {
|
type Driver struct {
|
||||||
driver.InitConfig
|
driver.InitConfig
|
||||||
factory driver.Factory
|
factory driver.Factory
|
||||||
userNSRemap bool // true if dockerd is running with userns-remap mode
|
netMode string
|
||||||
netMode string
|
image string
|
||||||
image string
|
env []string
|
||||||
cgroupParent string
|
|
||||||
env []string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) IsMobyDriver() bool {
|
func (d *Driver) IsMobyDriver() bool {
|
||||||
@@ -113,34 +113,30 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
|||||||
if err := l.Wrap("creating container "+d.Name, func() error {
|
if err := l.Wrap("creating container "+d.Name, func() error {
|
||||||
hc := &container.HostConfig{
|
hc := &container.HostConfig{
|
||||||
Privileged: true,
|
Privileged: true,
|
||||||
|
UsernsMode: "host",
|
||||||
Mounts: []mount.Mount{
|
Mounts: []mount.Mount{
|
||||||
{
|
{
|
||||||
Type: mount.TypeVolume,
|
Type: mount.TypeVolume,
|
||||||
Source: d.Name + volumeStateSuffix,
|
Source: d.Name + volumeStateSuffix,
|
||||||
Target: confutil.DefaultBuildKitStateDir,
|
Target: containerBuildKitRootDir,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if d.userNSRemap {
|
|
||||||
hc.UsernsMode = "host"
|
|
||||||
}
|
|
||||||
if d.netMode != "" {
|
if d.netMode != "" {
|
||||||
hc.NetworkMode = container.NetworkMode(d.netMode)
|
hc.NetworkMode = container.NetworkMode(d.netMode)
|
||||||
}
|
}
|
||||||
if info, err := d.DockerAPI.Info(ctx); err == nil && info.CgroupDriver == "cgroupfs" {
|
|
||||||
// Place all buildkit containers inside this cgroup by default so limits can be attached
|
|
||||||
// to all build activity on the host.
|
|
||||||
hc.CgroupParent = "/docker/buildx"
|
|
||||||
if d.cgroupParent != "" {
|
|
||||||
hc.CgroupParent = d.cgroupParent
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, d.Name)
|
_, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, d.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := d.copyToContainer(ctx, d.InitConfig.Files); err != nil {
|
if f := d.InitConfig.ConfigFile; f != "" {
|
||||||
return err
|
buf, err := readFileToTar(f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := d.DockerAPI.CopyToContainer(ctx, d.Name, "/", buf, dockertypes.CopyToContainerOptions{}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if err := d.start(ctx, l); err != nil {
|
if err := d.start(ctx, l); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -200,24 +196,6 @@ func (d *Driver) copyLogs(ctx context.Context, l progress.SubLogger) error {
|
|||||||
return rc.Close()
|
return rc.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) copyToContainer(ctx context.Context, files map[string][]byte) error {
|
|
||||||
srcPath, err := writeConfigFiles(files)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if srcPath != "" {
|
|
||||||
defer os.RemoveAll(srcPath)
|
|
||||||
}
|
|
||||||
srcArchive, err := dockerarchive.TarWithOptions(srcPath, &dockerarchive.TarOptions{
|
|
||||||
ChownOpts: &idtools.Identity{UID: 0, GID: 0},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer srcArchive.Close()
|
|
||||||
return d.DockerAPI.CopyToContainer(ctx, d.Name, "/", srcArchive, dockertypes.CopyToContainerOptions{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *Driver) exec(ctx context.Context, cmd []string) (string, net.Conn, error) {
|
func (d *Driver) exec(ctx context.Context, cmd []string) (string, net.Conn, error) {
|
||||||
execConfig := types.ExecConfig{
|
execConfig := types.ExecConfig{
|
||||||
Cmd: cmd,
|
Cmd: cmd,
|
||||||
@@ -298,7 +276,7 @@ func (d *Driver) Stop(ctx context.Context, force bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||||
info, err := d.Info(ctx)
|
info, err := d.Info(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -308,22 +286,20 @@ func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if rmDaemon {
|
if err := d.DockerAPI.ContainerRemove(ctx, d.Name, dockertypes.ContainerRemoveOptions{
|
||||||
if err := d.DockerAPI.ContainerRemove(ctx, d.Name, dockertypes.ContainerRemoveOptions{
|
RemoveVolumes: true,
|
||||||
RemoveVolumes: true,
|
Force: force,
|
||||||
Force: force,
|
}); err != nil {
|
||||||
}); err != nil {
|
return err
|
||||||
return err
|
}
|
||||||
}
|
for _, v := range container.Mounts {
|
||||||
for _, v := range container.Mounts {
|
if v.Name == d.Name+volumeStateSuffix {
|
||||||
if v.Name != d.Name+volumeStateSuffix {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if rmVolume {
|
if rmVolume {
|
||||||
return d.DockerAPI.VolumeRemove(ctx, d.Name+volumeStateSuffix, false)
|
return d.DockerAPI.VolumeRemove(ctx, d.Name+volumeStateSuffix, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -381,6 +357,29 @@ func (d *demux) Read(dt []byte) (int, error) {
|
|||||||
return d.Reader.Read(dt)
|
return d.Reader.Read(dt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func readFileToTar(fn string) (*bytes.Buffer, error) {
|
||||||
|
buf := bytes.NewBuffer(nil)
|
||||||
|
tw := tar.NewWriter(buf)
|
||||||
|
dt, err := ioutil.ReadFile(fn)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := tw.WriteHeader(&tar.Header{
|
||||||
|
Name: "/etc/buildkit/buildkitd.toml",
|
||||||
|
Size: int64(len(dt)),
|
||||||
|
Mode: 0644,
|
||||||
|
}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, err := tw.Write(dt); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := tw.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf, nil
|
||||||
|
}
|
||||||
|
|
||||||
type logWriter struct {
|
type logWriter struct {
|
||||||
logger progress.SubLogger
|
logger progress.SubLogger
|
||||||
stream int
|
stream int
|
||||||
@@ -390,27 +389,3 @@ func (l *logWriter) Write(dt []byte) (int, error) {
|
|||||||
l.logger.Log(l.stream, dt)
|
l.logger.Log(l.stream, dt)
|
||||||
return len(dt), nil
|
return len(dt), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeConfigFiles(m map[string][]byte) (_ string, err error) {
|
|
||||||
// Temp dir that will be copied to the container
|
|
||||||
tmpDir, err := os.MkdirTemp("", "buildkitd-config")
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
os.RemoveAll(tmpDir)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
for f, dt := range m {
|
|
||||||
f = path.Join(confutil.DefaultBuildKitConfigDir, f)
|
|
||||||
p := filepath.Join(tmpDir, f)
|
|
||||||
if err := os.MkdirAll(filepath.Dir(p), 0700); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if err := os.WriteFile(p, dt, 0600); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tmpDir, nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
dockertypes "github.com/docker/docker/api/types"
|
|
||||||
dockerclient "github.com/docker/docker/client"
|
dockerclient "github.com/docker/docker/client"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
@@ -41,20 +40,6 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
|||||||
return nil, errors.Errorf("%s driver requires docker API access", f.Name())
|
return nil, errors.Errorf("%s driver requires docker API access", f.Name())
|
||||||
}
|
}
|
||||||
d := &Driver{factory: f, InitConfig: cfg}
|
d := &Driver{factory: f, InitConfig: cfg}
|
||||||
dockerInfo, err := cfg.DockerAPI.Info(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
secOpts, err := dockertypes.DecodeSecurityOptions(dockerInfo.SecurityOptions)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, f := range secOpts {
|
|
||||||
if f.Name == "userns" {
|
|
||||||
d.userNSRemap = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for k, v := range cfg.DriverOpts {
|
for k, v := range cfg.DriverOpts {
|
||||||
switch {
|
switch {
|
||||||
case k == "network":
|
case k == "network":
|
||||||
@@ -64,8 +49,6 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
|||||||
}
|
}
|
||||||
case k == "image":
|
case k == "image":
|
||||||
d.image = v
|
d.image = v
|
||||||
case k == "cgroup-parent":
|
|
||||||
d.cgroupParent = v
|
|
||||||
case strings.HasPrefix(k, "env."):
|
case strings.HasPrefix(k, "env."):
|
||||||
envName := strings.TrimPrefix(k, "env.")
|
envName := strings.TrimPrefix(k, "env.")
|
||||||
if envName == "" {
|
if envName == "" {
|
||||||
|
|||||||
@@ -33,15 +33,13 @@ func (d *Driver) Stop(ctx context.Context, force bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
|
func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
|
||||||
return client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
return client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
||||||
return d.DockerAPI.DialHijack(ctx, "/grpc", "h2c", nil)
|
return d.DockerAPI.DialHijack(ctx, "/grpc", "h2c", nil)
|
||||||
}), client.WithSessionDialer(func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
|
|
||||||
return d.DockerAPI.DialHijack(ctx, "/session", proto, meta)
|
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
|||||||
if cfg.DockerAPI == nil {
|
if cfg.DockerAPI == nil {
|
||||||
return nil, errors.Errorf("docker driver requires docker API access")
|
return nil, errors.Errorf("docker driver requires docker API access")
|
||||||
}
|
}
|
||||||
if len(cfg.Files) > 0 {
|
if cfg.ConfigFile != "" {
|
||||||
return nil, errors.Errorf("setting config file is not supported for docker driver, use dockerd configuration file")
|
return nil, errors.Errorf("setting config file is not supported for docker driver, use dockerd configuration file")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ type Driver interface {
|
|||||||
Bootstrap(context.Context, progress.Logger) error
|
Bootstrap(context.Context, progress.Logger) error
|
||||||
Info(context.Context) (*Info, error)
|
Info(context.Context) (*Info, error)
|
||||||
Stop(ctx context.Context, force bool) error
|
Stop(ctx context.Context, force bool) error
|
||||||
Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error
|
Rm(ctx context.Context, force bool, rmVolume bool) error
|
||||||
Client(ctx context.Context) (*client.Client, error)
|
Client(ctx context.Context) (*client.Client, error)
|
||||||
Features() map[Feature]bool
|
Features() map[Feature]bool
|
||||||
IsMobyDriver() bool
|
IsMobyDriver() bool
|
||||||
|
|||||||
@@ -1,225 +0,0 @@
|
|||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/docker/cli/cli/context"
|
|
||||||
"github.com/docker/cli/cli/context/store"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"k8s.io/client-go/tools/clientcmd"
|
|
||||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
func testEndpoint(server, defaultNamespace string, ca, cert, key []byte, skipTLSVerify bool) Endpoint {
|
|
||||||
var tlsData *context.TLSData
|
|
||||||
if ca != nil || cert != nil || key != nil {
|
|
||||||
tlsData = &context.TLSData{
|
|
||||||
CA: ca,
|
|
||||||
Cert: cert,
|
|
||||||
Key: key,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Endpoint{
|
|
||||||
EndpointMeta: EndpointMeta{
|
|
||||||
EndpointMetaBase: context.EndpointMetaBase{
|
|
||||||
Host: server,
|
|
||||||
SkipTLSVerify: skipTLSVerify,
|
|
||||||
},
|
|
||||||
DefaultNamespace: defaultNamespace,
|
|
||||||
},
|
|
||||||
TLSData: tlsData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var testStoreCfg = store.NewConfig(
|
|
||||||
func() interface{} {
|
|
||||||
return &map[string]interface{}{}
|
|
||||||
},
|
|
||||||
store.EndpointTypeGetter(KubernetesEndpoint, func() interface{} { return &EndpointMeta{} }),
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSaveLoadContexts(t *testing.T) {
|
|
||||||
storeDir, err := ioutil.TempDir("", "test-load-save-k8-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer os.RemoveAll(storeDir)
|
|
||||||
store := store.New(storeDir, testStoreCfg)
|
|
||||||
require.NoError(t, save(store, testEndpoint("https://test", "test", nil, nil, nil, false), "raw-notls"))
|
|
||||||
require.NoError(t, save(store, testEndpoint("https://test", "test", nil, nil, nil, true), "raw-notls-skip"))
|
|
||||||
require.NoError(t, save(store, testEndpoint("https://test", "test", []byte("ca"), []byte("cert"), []byte("key"), true), "raw-tls"))
|
|
||||||
|
|
||||||
kcFile, err := ioutil.TempFile(os.TempDir(), "test-load-save-k8-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer os.Remove(kcFile.Name())
|
|
||||||
defer kcFile.Close()
|
|
||||||
cfg := clientcmdapi.NewConfig()
|
|
||||||
cfg.AuthInfos["user"] = clientcmdapi.NewAuthInfo()
|
|
||||||
cfg.Contexts["context1"] = clientcmdapi.NewContext()
|
|
||||||
cfg.Clusters["cluster1"] = clientcmdapi.NewCluster()
|
|
||||||
cfg.Contexts["context2"] = clientcmdapi.NewContext()
|
|
||||||
cfg.Clusters["cluster2"] = clientcmdapi.NewCluster()
|
|
||||||
cfg.AuthInfos["user"].ClientCertificateData = []byte("cert")
|
|
||||||
cfg.AuthInfos["user"].ClientKeyData = []byte("key")
|
|
||||||
cfg.Clusters["cluster1"].Server = "https://server1"
|
|
||||||
cfg.Clusters["cluster1"].InsecureSkipTLSVerify = true
|
|
||||||
cfg.Clusters["cluster2"].Server = "https://server2"
|
|
||||||
cfg.Clusters["cluster2"].CertificateAuthorityData = []byte("ca")
|
|
||||||
cfg.Contexts["context1"].AuthInfo = "user"
|
|
||||||
cfg.Contexts["context1"].Cluster = "cluster1"
|
|
||||||
cfg.Contexts["context1"].Namespace = "namespace1"
|
|
||||||
cfg.Contexts["context2"].AuthInfo = "user"
|
|
||||||
cfg.Contexts["context2"].Cluster = "cluster2"
|
|
||||||
cfg.Contexts["context2"].Namespace = "namespace2"
|
|
||||||
cfg.CurrentContext = "context1"
|
|
||||||
cfgData, err := clientcmd.Write(*cfg)
|
|
||||||
require.NoError(t, err)
|
|
||||||
_, err = kcFile.Write(cfgData)
|
|
||||||
require.NoError(t, err)
|
|
||||||
kcFile.Close()
|
|
||||||
|
|
||||||
epDefault, err := FromKubeConfig(kcFile.Name(), "", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
epContext2, err := FromKubeConfig(kcFile.Name(), "context2", "namespace-override")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, save(store, epDefault, "embed-default-context"))
|
|
||||||
require.NoError(t, save(store, epContext2, "embed-context2"))
|
|
||||||
|
|
||||||
rawNoTLSMeta, err := store.GetMetadata("raw-notls")
|
|
||||||
require.NoError(t, err)
|
|
||||||
rawNoTLSSkipMeta, err := store.GetMetadata("raw-notls-skip")
|
|
||||||
require.NoError(t, err)
|
|
||||||
rawTLSMeta, err := store.GetMetadata("raw-tls")
|
|
||||||
require.NoError(t, err)
|
|
||||||
embededDefaultMeta, err := store.GetMetadata("embed-default-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
embededContext2Meta, err := store.GetMetadata("embed-context2")
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
rawNoTLS := EndpointFromContext(rawNoTLSMeta)
|
|
||||||
rawNoTLSSkip := EndpointFromContext(rawNoTLSSkipMeta)
|
|
||||||
rawTLS := EndpointFromContext(rawTLSMeta)
|
|
||||||
embededDefault := EndpointFromContext(embededDefaultMeta)
|
|
||||||
embededContext2 := EndpointFromContext(embededContext2Meta)
|
|
||||||
|
|
||||||
rawNoTLSEP, err := rawNoTLS.WithTLSData(store, "raw-notls")
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkClientConfig(t, rawNoTLSEP, "https://test", "test", nil, nil, nil, false)
|
|
||||||
rawNoTLSSkipEP, err := rawNoTLSSkip.WithTLSData(store, "raw-notls-skip")
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkClientConfig(t, rawNoTLSSkipEP, "https://test", "test", nil, nil, nil, true)
|
|
||||||
rawTLSEP, err := rawTLS.WithTLSData(store, "raw-tls")
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkClientConfig(t, rawTLSEP, "https://test", "test", []byte("ca"), []byte("cert"), []byte("key"), true)
|
|
||||||
embededDefaultEP, err := embededDefault.WithTLSData(store, "embed-default-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkClientConfig(t, embededDefaultEP, "https://server1", "namespace1", nil, []byte("cert"), []byte("key"), true)
|
|
||||||
embededContext2EP, err := embededContext2.WithTLSData(store, "embed-context2")
|
|
||||||
require.NoError(t, err)
|
|
||||||
checkClientConfig(t, embededContext2EP, "https://server2", "namespace-override", []byte("ca"), []byte("cert"), []byte("key"), false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkClientConfig(t *testing.T, ep Endpoint, server, namespace string, ca, cert, key []byte, skipTLSVerify bool) {
|
|
||||||
config := ep.KubernetesConfig()
|
|
||||||
cfg, err := config.ClientConfig()
|
|
||||||
require.NoError(t, err)
|
|
||||||
ns, _, _ := config.Namespace()
|
|
||||||
assert.Equal(t, server, cfg.Host)
|
|
||||||
assert.Equal(t, namespace, ns)
|
|
||||||
assert.Equal(t, ca, cfg.CAData)
|
|
||||||
assert.Equal(t, cert, cfg.CertData)
|
|
||||||
assert.Equal(t, key, cfg.KeyData)
|
|
||||||
assert.Equal(t, skipTLSVerify, cfg.Insecure)
|
|
||||||
}
|
|
||||||
|
|
||||||
func save(s store.Writer, ep Endpoint, name string) error {
|
|
||||||
meta := store.Metadata{
|
|
||||||
Endpoints: map[string]interface{}{
|
|
||||||
KubernetesEndpoint: ep.EndpointMeta,
|
|
||||||
},
|
|
||||||
Name: name,
|
|
||||||
}
|
|
||||||
if err := s.CreateOrUpdate(meta); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return s.ResetEndpointTLSMaterial(name, KubernetesEndpoint, ep.TLSData.ToStoreTLSData())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSaveLoadGKEConfig(t *testing.T) {
|
|
||||||
storeDir, err := ioutil.TempDir("", t.Name())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer os.RemoveAll(storeDir)
|
|
||||||
store := store.New(storeDir, testStoreCfg)
|
|
||||||
cfg, err := clientcmd.LoadFromFile("fixtures/gke-kubeconfig")
|
|
||||||
require.NoError(t, err)
|
|
||||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
|
||||||
expectedCfg, err := clientCfg.ClientConfig()
|
|
||||||
require.NoError(t, err)
|
|
||||||
ep, err := FromKubeConfig("fixtures/gke-kubeconfig", "", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, save(store, ep, "gke-context"))
|
|
||||||
persistedMetadata, err := store.GetMetadata("gke-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
|
||||||
assert.True(t, persistedEPMeta != nil)
|
|
||||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "gke-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
persistedCfg := persistedEP.KubernetesConfig()
|
|
||||||
actualCfg, err := persistedCfg.ClientConfig()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, expectedCfg.AuthProvider, actualCfg.AuthProvider)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSaveLoadEKSConfig(t *testing.T) {
|
|
||||||
storeDir, err := ioutil.TempDir("", t.Name())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer os.RemoveAll(storeDir)
|
|
||||||
store := store.New(storeDir, testStoreCfg)
|
|
||||||
cfg, err := clientcmd.LoadFromFile("fixtures/eks-kubeconfig")
|
|
||||||
require.NoError(t, err)
|
|
||||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
|
||||||
expectedCfg, err := clientCfg.ClientConfig()
|
|
||||||
require.NoError(t, err)
|
|
||||||
ep, err := FromKubeConfig("fixtures/eks-kubeconfig", "", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, save(store, ep, "eks-context"))
|
|
||||||
persistedMetadata, err := store.GetMetadata("eks-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
|
||||||
assert.True(t, persistedEPMeta != nil)
|
|
||||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "eks-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
persistedCfg := persistedEP.KubernetesConfig()
|
|
||||||
actualCfg, err := persistedCfg.ClientConfig()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, expectedCfg.ExecProvider, actualCfg.ExecProvider)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSaveLoadK3SConfig(t *testing.T) {
|
|
||||||
storeDir, err := ioutil.TempDir("", t.Name())
|
|
||||||
require.NoError(t, err)
|
|
||||||
defer os.RemoveAll(storeDir)
|
|
||||||
store := store.New(storeDir, testStoreCfg)
|
|
||||||
cfg, err := clientcmd.LoadFromFile("fixtures/k3s-kubeconfig")
|
|
||||||
require.NoError(t, err)
|
|
||||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
|
||||||
expectedCfg, err := clientCfg.ClientConfig()
|
|
||||||
require.NoError(t, err)
|
|
||||||
ep, err := FromKubeConfig("fixtures/k3s-kubeconfig", "", "")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.NoError(t, save(store, ep, "k3s-context"))
|
|
||||||
persistedMetadata, err := store.GetMetadata("k3s-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
|
||||||
assert.True(t, persistedEPMeta != nil)
|
|
||||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "k3s-context")
|
|
||||||
require.NoError(t, err)
|
|
||||||
persistedCfg := persistedEP.KubernetesConfig()
|
|
||||||
actualCfg, err := persistedCfg.ClientConfig()
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.True(t, len(actualCfg.Username) > 0)
|
|
||||||
assert.True(t, len(actualCfg.Password) > 0)
|
|
||||||
assert.Equal(t, expectedCfg.Username, actualCfg.Username)
|
|
||||||
assert.Equal(t, expectedCfg.Password, actualCfg.Password)
|
|
||||||
}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
clusters:
|
|
||||||
- cluster:
|
|
||||||
server: https://some-server
|
|
||||||
name: kubernetes
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: kubernetes
|
|
||||||
user: aws
|
|
||||||
name: aws
|
|
||||||
current-context: aws
|
|
||||||
kind: Config
|
|
||||||
preferences: {}
|
|
||||||
users:
|
|
||||||
- name: aws
|
|
||||||
user:
|
|
||||||
exec:
|
|
||||||
apiVersion: client.authentication.k8s.io/v1alpha1
|
|
||||||
command: heptio-authenticator-aws
|
|
||||||
args:
|
|
||||||
- "token"
|
|
||||||
- "-i"
|
|
||||||
- "eks-cf"
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
clusters:
|
|
||||||
- cluster:
|
|
||||||
server: https://some-server
|
|
||||||
name: gke_sample
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: gke_sample
|
|
||||||
user: gke_sample
|
|
||||||
name: gke_sample
|
|
||||||
current-context: gke_sample
|
|
||||||
kind: Config
|
|
||||||
preferences: {}
|
|
||||||
users:
|
|
||||||
- name: gke_sample
|
|
||||||
user:
|
|
||||||
auth-provider:
|
|
||||||
config:
|
|
||||||
cmd-args: config config-helper --format=json
|
|
||||||
cmd-path: /google/google-cloud-sdk/bin/gcloud
|
|
||||||
expiry-key: '{.credential.token_expiry}'
|
|
||||||
token-key: '{.credential.access_token}'
|
|
||||||
name: gcp
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
clusters:
|
|
||||||
- cluster:
|
|
||||||
certificate-authority-data: dGhlLWNh
|
|
||||||
server: https://someserver
|
|
||||||
name: test-cluster
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: test-cluster
|
|
||||||
user: test-user
|
|
||||||
namespace: zoinx
|
|
||||||
name: test
|
|
||||||
current-context: test
|
|
||||||
kind: Config
|
|
||||||
preferences: {}
|
|
||||||
users:
|
|
||||||
- name: test-user
|
|
||||||
user:
|
|
||||||
username: admin
|
|
||||||
password: testpwd
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
clusters:
|
|
||||||
- cluster:
|
|
||||||
certificate-authority-data: dGhlLWNh
|
|
||||||
server: https://someserver
|
|
||||||
name: test-cluster
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: test-cluster
|
|
||||||
user: test-user
|
|
||||||
namespace: zoinx
|
|
||||||
name: test
|
|
||||||
current-context: test
|
|
||||||
kind: Config
|
|
||||||
preferences: {}
|
|
||||||
users:
|
|
||||||
- name: test-user
|
|
||||||
user:
|
|
||||||
client-certificate-data: dGhlLWNlcnQ=
|
|
||||||
client-key-data: dGhlLWtleQ==
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/docker/cli/cli/command"
|
|
||||||
"github.com/docker/cli/cli/config/configfile"
|
|
||||||
cliflags "github.com/docker/cli/cli/flags"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDefaultContextInitializer(t *testing.T) {
|
|
||||||
cli, err := command.NewDockerCli()
|
|
||||||
require.NoError(t, err)
|
|
||||||
os.Setenv("KUBECONFIG", "./fixtures/test-kubeconfig")
|
|
||||||
defer os.Unsetenv("KUBECONFIG")
|
|
||||||
ctx, err := command.ResolveDefaultContext(&cliflags.CommonOptions{}, &configfile.ConfigFile{}, command.DefaultContextStoreConfig(), cli.Err())
|
|
||||||
require.NoError(t, err)
|
|
||||||
assert.Equal(t, "default", ctx.Meta.Name)
|
|
||||||
assert.Equal(t, "zoinx", ctx.Meta.Endpoints[KubernetesEndpoint].(EndpointMeta).DefaultNamespace)
|
|
||||||
}
|
|
||||||
@@ -18,8 +18,6 @@ import (
|
|||||||
"github.com/moby/buildkit/util/tracing/detect"
|
"github.com/moby/buildkit/util/tracing/detect"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
|
clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||||
@@ -41,18 +39,15 @@ type Driver struct {
|
|||||||
factory driver.Factory
|
factory driver.Factory
|
||||||
minReplicas int
|
minReplicas int
|
||||||
deployment *appsv1.Deployment
|
deployment *appsv1.Deployment
|
||||||
configMaps []*corev1.ConfigMap
|
|
||||||
clientset *kubernetes.Clientset
|
clientset *kubernetes.Clientset
|
||||||
deploymentClient clientappsv1.DeploymentInterface
|
deploymentClient clientappsv1.DeploymentInterface
|
||||||
podClient clientcorev1.PodInterface
|
podClient clientcorev1.PodInterface
|
||||||
configMapClient clientcorev1.ConfigMapInterface
|
|
||||||
podChooser podchooser.PodChooser
|
podChooser podchooser.PodChooser
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) IsMobyDriver() bool {
|
func (d *Driver) IsMobyDriver() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Config() driver.InitConfig {
|
func (d *Driver) Config() driver.InitConfig {
|
||||||
return d.InitConfig
|
return d.InitConfig
|
||||||
}
|
}
|
||||||
@@ -61,24 +56,7 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
|||||||
return progress.Wrap("[internal] booting buildkit", l, func(sub progress.SubLogger) error {
|
return progress.Wrap("[internal] booting buildkit", l, func(sub progress.SubLogger) error {
|
||||||
_, err := d.deploymentClient.Get(ctx, d.deployment.Name, metav1.GetOptions{})
|
_, err := d.deploymentClient.Get(ctx, d.deployment.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !apierrors.IsNotFound(err) {
|
// TODO: return err if err != ErrNotFound
|
||||||
return errors.Wrapf(err, "error for bootstrap %q", d.deployment.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, cfg := range d.configMaps {
|
|
||||||
// create ConfigMap first if exists
|
|
||||||
_, err = d.configMapClient.Create(ctx, cfg, metav1.CreateOptions{})
|
|
||||||
if err != nil {
|
|
||||||
if !apierrors.IsAlreadyExists(err) {
|
|
||||||
return errors.Wrapf(err, "error while calling configMapClient.Create for %q", cfg.Name)
|
|
||||||
}
|
|
||||||
_, err = d.configMapClient.Update(ctx, cfg, metav1.UpdateOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "error while calling configMapClient.Update for %q", cfg.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = d.deploymentClient.Create(ctx, d.deployment, metav1.CreateOptions{})
|
_, err = d.deploymentClient.Create(ctx, d.deployment, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error while calling deploymentClient.Create for %q", d.deployment.Name)
|
return errors.Wrapf(err, "error while calling deploymentClient.Create for %q", d.deployment.Name)
|
||||||
@@ -165,22 +143,9 @@ func (d *Driver) Stop(ctx context.Context, force bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||||
if !rmDaemon {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := d.deploymentClient.Delete(ctx, d.deployment.Name, metav1.DeleteOptions{}); err != nil {
|
if err := d.deploymentClient.Delete(ctx, d.deployment.Name, metav1.DeleteOptions{}); err != nil {
|
||||||
if !apierrors.IsNotFound(err) {
|
return errors.Wrapf(err, "error while calling deploymentClient.Delete for %q", d.deployment.Name)
|
||||||
return errors.Wrapf(err, "error while calling deploymentClient.Delete for %q", d.deployment.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, cfg := range d.configMaps {
|
|
||||||
if err := d.configMapClient.Delete(ctx, cfg.Name, metav1.DeleteOptions{}); err != nil {
|
|
||||||
if !apierrors.IsNotFound(err) {
|
|
||||||
return errors.Wrapf(err, "error while calling configMapClient.Delete for %q", cfg.Name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -59,13 +59,11 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &Driver{
|
d := &Driver{
|
||||||
factory: f,
|
factory: f,
|
||||||
InitConfig: cfg,
|
InitConfig: cfg,
|
||||||
clientset: clientset,
|
clientset: clientset,
|
||||||
}
|
}
|
||||||
|
|
||||||
deploymentOpt := &manifest.DeploymentOpt{
|
deploymentOpt := &manifest.DeploymentOpt{
|
||||||
Name: deploymentName,
|
Name: deploymentName,
|
||||||
Image: bkimage.DefaultImage,
|
Image: bkimage.DefaultImage,
|
||||||
@@ -73,19 +71,13 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
|||||||
BuildkitFlags: cfg.BuildkitFlags,
|
BuildkitFlags: cfg.BuildkitFlags,
|
||||||
Rootless: false,
|
Rootless: false,
|
||||||
Platforms: cfg.Platforms,
|
Platforms: cfg.Platforms,
|
||||||
ConfigFiles: cfg.Files,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
deploymentOpt.Qemu.Image = bkimage.QemuImage
|
|
||||||
|
|
||||||
loadbalance := LoadbalanceSticky
|
loadbalance := LoadbalanceSticky
|
||||||
|
imageOverride := ""
|
||||||
for k, v := range cfg.DriverOpts {
|
for k, v := range cfg.DriverOpts {
|
||||||
switch k {
|
switch k {
|
||||||
case "image":
|
case "image":
|
||||||
if v != "" {
|
imageOverride = v
|
||||||
deploymentOpt.Image = v
|
|
||||||
}
|
|
||||||
case "namespace":
|
case "namespace":
|
||||||
namespace = v
|
namespace = v
|
||||||
case "replicas":
|
case "replicas":
|
||||||
@@ -125,31 +117,20 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
|||||||
return nil, errors.Errorf("invalid loadbalance %q", v)
|
return nil, errors.Errorf("invalid loadbalance %q", v)
|
||||||
}
|
}
|
||||||
loadbalance = v
|
loadbalance = v
|
||||||
case "qemu.install":
|
|
||||||
deploymentOpt.Qemu.Install, err = strconv.ParseBool(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
case "qemu.image":
|
|
||||||
if v != "" {
|
|
||||||
deploymentOpt.Qemu.Image = v
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("invalid driver option %s for driver %s", k, DriverName)
|
return nil, errors.Errorf("invalid driver option %s for driver %s", k, DriverName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if imageOverride != "" {
|
||||||
d.deployment, d.configMaps, err = manifest.NewDeployment(deploymentOpt)
|
deploymentOpt.Image = imageOverride
|
||||||
|
}
|
||||||
|
d.deployment, err = manifest.NewDeployment(deploymentOpt)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.minReplicas = deploymentOpt.Replicas
|
d.minReplicas = deploymentOpt.Replicas
|
||||||
|
|
||||||
d.deploymentClient = clientset.AppsV1().Deployments(namespace)
|
d.deploymentClient = clientset.AppsV1().Deployments(namespace)
|
||||||
d.podClient = clientset.CoreV1().Pods(namespace)
|
d.podClient = clientset.CoreV1().Pods(namespace)
|
||||||
d.configMapClient = clientset.CoreV1().ConfigMaps(namespace)
|
|
||||||
|
|
||||||
switch loadbalance {
|
switch loadbalance {
|
||||||
case LoadbalanceSticky:
|
case LoadbalanceSticky:
|
||||||
d.podChooser = &podchooser.StickyPodChooser{
|
d.podChooser = &podchooser.StickyPodChooser{
|
||||||
|
|||||||
@@ -1,8 +1,6 @@
|
|||||||
package manifest
|
package manifest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"path"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
@@ -14,22 +12,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type DeploymentOpt struct {
|
type DeploymentOpt struct {
|
||||||
Namespace string
|
Namespace string
|
||||||
Name string
|
Name string
|
||||||
Image string
|
Image string
|
||||||
Replicas int
|
Replicas int
|
||||||
|
BuildkitFlags []string
|
||||||
// Qemu
|
|
||||||
Qemu struct {
|
|
||||||
// when true, will install binfmt
|
|
||||||
Install bool
|
|
||||||
Image string
|
|
||||||
}
|
|
||||||
|
|
||||||
BuildkitFlags []string
|
|
||||||
// files mounted at /etc/buildkitd
|
|
||||||
ConfigFiles map[string][]byte
|
|
||||||
|
|
||||||
Rootless bool
|
Rootless bool
|
||||||
NodeSelector map[string]string
|
NodeSelector map[string]string
|
||||||
RequestsCPU string
|
RequestsCPU string
|
||||||
@@ -44,7 +31,7 @@ const (
|
|||||||
AnnotationPlatform = "buildx.docker.com/platform"
|
AnnotationPlatform = "buildx.docker.com/platform"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.ConfigMap, err error) {
|
func NewDeployment(opt *DeploymentOpt) (*appsv1.Deployment, error) {
|
||||||
labels := map[string]string{
|
labels := map[string]string{
|
||||||
"app": opt.Name,
|
"app": opt.Name,
|
||||||
}
|
}
|
||||||
@@ -57,7 +44,7 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
|
|||||||
annotations[AnnotationPlatform] = strings.Join(platformutil.Format(opt.Platforms), ",")
|
annotations[AnnotationPlatform] = strings.Join(platformutil.Format(opt.Platforms), ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
d = &appsv1.Deployment{
|
d := &appsv1.Deployment{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||||
Kind: "Deployment",
|
Kind: "Deployment",
|
||||||
@@ -104,54 +91,9 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, cfg := range splitConfigFiles(opt.ConfigFiles) {
|
|
||||||
cc := &corev1.ConfigMap{
|
|
||||||
TypeMeta: metav1.TypeMeta{
|
|
||||||
APIVersion: corev1.SchemeGroupVersion.String(),
|
|
||||||
Kind: "ConfigMap",
|
|
||||||
},
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Namespace: opt.Namespace,
|
|
||||||
Name: opt.Name + "-" + cfg.name,
|
|
||||||
Annotations: annotations,
|
|
||||||
},
|
|
||||||
Data: cfg.files,
|
|
||||||
}
|
|
||||||
|
|
||||||
d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{
|
|
||||||
Name: cfg.name,
|
|
||||||
MountPath: path.Join("/etc/buildkit", cfg.path),
|
|
||||||
}}
|
|
||||||
|
|
||||||
d.Spec.Template.Spec.Volumes = []corev1.Volume{{
|
|
||||||
Name: "config",
|
|
||||||
VolumeSource: corev1.VolumeSource{
|
|
||||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
|
||||||
LocalObjectReference: corev1.LocalObjectReference{
|
|
||||||
Name: cc.Name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}}
|
|
||||||
c = append(c, cc)
|
|
||||||
}
|
|
||||||
|
|
||||||
if opt.Qemu.Install {
|
|
||||||
d.Spec.Template.Spec.InitContainers = []corev1.Container{
|
|
||||||
{
|
|
||||||
Name: "qemu",
|
|
||||||
Image: opt.Qemu.Image,
|
|
||||||
Args: []string{"--install", "all"},
|
|
||||||
SecurityContext: &corev1.SecurityContext{
|
|
||||||
Privileged: &privileged,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if opt.Rootless {
|
if opt.Rootless {
|
||||||
if err := toRootless(d); err != nil {
|
if err := toRootless(d); err != nil {
|
||||||
return nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -162,7 +104,7 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
|
|||||||
if opt.RequestsCPU != "" {
|
if opt.RequestsCPU != "" {
|
||||||
reqCPU, err := resource.ParseQuantity(opt.RequestsCPU)
|
reqCPU, err := resource.ParseQuantity(opt.RequestsCPU)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
d.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU] = reqCPU
|
d.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU] = reqCPU
|
||||||
}
|
}
|
||||||
@@ -170,7 +112,7 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
|
|||||||
if opt.RequestsMemory != "" {
|
if opt.RequestsMemory != "" {
|
||||||
reqMemory, err := resource.ParseQuantity(opt.RequestsMemory)
|
reqMemory, err := resource.ParseQuantity(opt.RequestsMemory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
d.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceMemory] = reqMemory
|
d.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceMemory] = reqMemory
|
||||||
}
|
}
|
||||||
@@ -178,7 +120,7 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
|
|||||||
if opt.LimitsCPU != "" {
|
if opt.LimitsCPU != "" {
|
||||||
limCPU, err := resource.ParseQuantity(opt.LimitsCPU)
|
limCPU, err := resource.ParseQuantity(opt.LimitsCPU)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
d.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceCPU] = limCPU
|
d.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceCPU] = limCPU
|
||||||
}
|
}
|
||||||
@@ -186,12 +128,12 @@ func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.Config
|
|||||||
if opt.LimitsMemory != "" {
|
if opt.LimitsMemory != "" {
|
||||||
limMemory, err := resource.ParseQuantity(opt.LimitsMemory)
|
limMemory, err := resource.ParseQuantity(opt.LimitsMemory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
d.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceMemory] = limMemory
|
d.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceMemory] = limMemory
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func toRootless(d *appsv1.Deployment) error {
|
func toRootless(d *appsv1.Deployment) error {
|
||||||
@@ -207,35 +149,3 @@ func toRootless(d *appsv1.Deployment) error {
|
|||||||
d.Spec.Template.ObjectMeta.Annotations["container.seccomp.security.alpha.kubernetes.io/"+containerName] = "unconfined"
|
d.Spec.Template.ObjectMeta.Annotations["container.seccomp.security.alpha.kubernetes.io/"+containerName] = "unconfined"
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type config struct {
|
|
||||||
name string
|
|
||||||
path string
|
|
||||||
files map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func splitConfigFiles(m map[string][]byte) []config {
|
|
||||||
var c []config
|
|
||||||
idx := map[string]int{}
|
|
||||||
nameIdx := 0
|
|
||||||
for k, v := range m {
|
|
||||||
dir := path.Dir(k)
|
|
||||||
i, ok := idx[dir]
|
|
||||||
if !ok {
|
|
||||||
idx[dir] = len(c)
|
|
||||||
i = len(c)
|
|
||||||
name := "config"
|
|
||||||
if dir != "." {
|
|
||||||
nameIdx++
|
|
||||||
name = fmt.Sprintf("%s-%d", name, nameIdx)
|
|
||||||
}
|
|
||||||
c = append(c, config{
|
|
||||||
path: dir,
|
|
||||||
name: name,
|
|
||||||
files: map[string]string{},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
c[i].files[path.Base(k)] = string(v)
|
|
||||||
}
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -53,7 +53,7 @@ type InitConfig struct {
|
|||||||
DockerAPI dockerclient.APIClient
|
DockerAPI dockerclient.APIClient
|
||||||
KubeClientConfig KubeClientConfig
|
KubeClientConfig KubeClientConfig
|
||||||
BuildkitFlags []string
|
BuildkitFlags []string
|
||||||
Files map[string][]byte
|
ConfigFile string
|
||||||
DriverOpts map[string]string
|
DriverOpts map[string]string
|
||||||
Auth Auth
|
Auth Auth
|
||||||
Platforms []specs.Platform
|
Platforms []specs.Platform
|
||||||
@@ -103,17 +103,17 @@ func GetFactory(name string, instanceRequired bool) Factory {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetDriver(ctx context.Context, name string, f Factory, api dockerclient.APIClient, auth Auth, kcc KubeClientConfig, flags []string, files map[string][]byte, do map[string]string, platforms []specs.Platform, contextPathHash string) (Driver, error) {
|
func GetDriver(ctx context.Context, name string, f Factory, api dockerclient.APIClient, auth Auth, kcc KubeClientConfig, flags []string, config string, do map[string]string, platforms []specs.Platform, contextPathHash string) (Driver, error) {
|
||||||
ic := InitConfig{
|
ic := InitConfig{
|
||||||
DockerAPI: api,
|
DockerAPI: api,
|
||||||
KubeClientConfig: kcc,
|
KubeClientConfig: kcc,
|
||||||
Name: name,
|
Name: name,
|
||||||
BuildkitFlags: flags,
|
BuildkitFlags: flags,
|
||||||
|
ConfigFile: config,
|
||||||
DriverOpts: do,
|
DriverOpts: do,
|
||||||
Auth: auth,
|
Auth: auth,
|
||||||
Platforms: platforms,
|
Platforms: platforms,
|
||||||
ContextPathHash: contextPathHash,
|
ContextPathHash: contextPathHash,
|
||||||
Files: files,
|
|
||||||
}
|
}
|
||||||
if f == nil {
|
if f == nil {
|
||||||
var err error
|
var err error
|
||||||
@@ -129,15 +129,8 @@ func GetDriver(ctx context.Context, name string, f Factory, api dockerclient.API
|
|||||||
return &cachedDriver{Driver: d}, nil
|
return &cachedDriver{Driver: d}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetFactories() []Factory {
|
func GetFactories() map[string]Factory {
|
||||||
ds := make([]Factory, 0, len(drivers))
|
return drivers
|
||||||
for _, d := range drivers {
|
|
||||||
ds = append(ds, d)
|
|
||||||
}
|
|
||||||
sort.Slice(ds, func(i, j int) bool {
|
|
||||||
return ds[i].Name() < ds[j].Name()
|
|
||||||
})
|
|
||||||
return ds
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type cachedDriver struct {
|
type cachedDriver struct {
|
||||||
|
|||||||
53
go.mod
53
go.mod
@@ -1,24 +1,28 @@
|
|||||||
module github.com/docker/buildx
|
module github.com/docker/buildx
|
||||||
|
|
||||||
go 1.16
|
go 1.13
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
|
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
|
||||||
|
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect
|
||||||
github.com/bugsnag/bugsnag-go v1.4.1 // indirect
|
github.com/bugsnag/bugsnag-go v1.4.1 // indirect
|
||||||
github.com/bugsnag/panicwrap v1.2.0 // indirect
|
github.com/bugsnag/panicwrap v1.2.0 // indirect
|
||||||
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
|
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
|
||||||
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
|
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
|
||||||
github.com/compose-spec/compose-go v1.2.1
|
github.com/compose-spec/compose-go v0.0.0-20210729195839-de56f4f0cb3c
|
||||||
github.com/containerd/console v1.0.3
|
github.com/containerd/console v1.0.2
|
||||||
github.com/containerd/containerd v1.6.1
|
github.com/containerd/containerd v1.5.4
|
||||||
github.com/docker/cli v20.10.12+incompatible
|
github.com/denisenkom/go-mssqldb v0.0.0-20190315220205-a8ed825ac853 // indirect
|
||||||
github.com/docker/cli-docs-tool v0.4.0
|
github.com/docker/cli v20.10.7+incompatible
|
||||||
github.com/docker/distribution v2.8.0+incompatible
|
github.com/docker/compose-on-kubernetes v0.4.19-0.20190128150448-356b2919c496 // indirect
|
||||||
|
github.com/docker/distribution v2.7.1+incompatible
|
||||||
github.com/docker/docker v20.10.7+incompatible
|
github.com/docker/docker v20.10.7+incompatible
|
||||||
|
github.com/docker/docker-credential-helpers v0.6.4-0.20210125172408-38bea2ce277a // indirect
|
||||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||||
github.com/docker/go-units v0.4.0
|
|
||||||
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 // indirect
|
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 // indirect
|
||||||
|
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect
|
||||||
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect
|
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect
|
||||||
|
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 // indirect
|
||||||
github.com/fvbommel/sortorder v1.0.1 // indirect
|
github.com/fvbommel/sortorder v1.0.1 // indirect
|
||||||
github.com/gofrs/flock v0.7.3
|
github.com/gofrs/flock v0.7.3
|
||||||
github.com/gofrs/uuid v3.3.0+incompatible // indirect
|
github.com/gofrs/uuid v3.3.0+incompatible // indirect
|
||||||
@@ -29,37 +33,34 @@ require (
|
|||||||
github.com/hashicorp/hcl/v2 v2.8.2
|
github.com/hashicorp/hcl/v2 v2.8.2
|
||||||
github.com/jinzhu/gorm v1.9.2 // indirect
|
github.com/jinzhu/gorm v1.9.2 // indirect
|
||||||
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
||||||
|
github.com/jinzhu/now v1.0.0 // indirect
|
||||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||||
github.com/moby/buildkit v0.10.1-0.20220403220257-10e6f94bf90d
|
github.com/lib/pq v1.10.0 // indirect
|
||||||
github.com/morikuni/aec v1.0.0
|
github.com/mattn/go-sqlite3 v1.10.0 // indirect
|
||||||
|
github.com/moby/buildkit v0.8.2-0.20210702160134-1a7543a10527
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5
|
github.com/opencontainers/image-spec v1.0.1
|
||||||
github.com/pelletier/go-toml v1.9.4
|
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
||||||
github.com/sirupsen/logrus v1.8.1
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/spf13/cobra v1.2.1
|
github.com/spf13/cobra v1.1.1
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/stretchr/testify v1.7.0
|
||||||
github.com/theupdateframework/notary v0.6.1 // indirect
|
github.com/theupdateframework/notary v0.6.1 // indirect
|
||||||
github.com/tonistiigi/fsutil v0.0.0-20220315205639-9ed612626da3 // indirect
|
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
||||||
github.com/zclconf/go-cty v1.10.0
|
github.com/zclconf/go-cty v1.7.1
|
||||||
go.opentelemetry.io/otel v1.4.1
|
go.opentelemetry.io/otel v1.0.0-RC1
|
||||||
go.opentelemetry.io/otel/trace v1.4.1
|
go.opentelemetry.io/otel/trace v1.0.0-RC1
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||||
google.golang.org/grpc v1.44.0
|
|
||||||
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
||||||
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
|
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
|
||||||
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
||||||
k8s.io/api v0.23.4
|
k8s.io/api v0.20.6
|
||||||
k8s.io/apimachinery v0.23.4
|
k8s.io/apimachinery v0.20.6
|
||||||
k8s.io/client-go v0.23.4
|
k8s.io/client-go v0.20.6
|
||||||
)
|
)
|
||||||
|
|
||||||
replace (
|
replace (
|
||||||
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20220226190722-8667ccd1124c+incompatible
|
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20210702143511-f782d1355eff+incompatible
|
||||||
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220121014307-40bb9831756f+incompatible
|
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20210609100121-ef4d47340142+incompatible
|
||||||
k8s.io/api => k8s.io/api v0.22.4
|
|
||||||
k8s.io/apimachinery => k8s.io/apimachinery v0.22.4
|
|
||||||
k8s.io/client-go => k8s.io/client-go v0.22.4
|
|
||||||
)
|
)
|
||||||
|
|||||||
16
hack/binaries
Executable file
16
hack/binaries
Executable file
@@ -0,0 +1,16 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
. $(dirname $0)/util
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
: ${TARGETPLATFORM=$CLI_PLATFORM}
|
||||||
|
|
||||||
|
platformFlag=""
|
||||||
|
if [ -n "$TARGETPLATFORM" ]; then
|
||||||
|
platformFlag="--platform $TARGETPLATFORM"
|
||||||
|
fi
|
||||||
|
|
||||||
|
buildxCmd build $platformFlag \
|
||||||
|
--target "binaries" \
|
||||||
|
--output "type=local,dest=./bin/" \
|
||||||
|
.
|
||||||
38
hack/build_ci_first_pass
Executable file
38
hack/build_ci_first_pass
Executable file
@@ -0,0 +1,38 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
TYP=$1
|
||||||
|
|
||||||
|
. $(dirname $0)/util
|
||||||
|
set -e
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
echo "usage: ./hack/build_ci_first_pass <binaries>"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ -z "$TYP" ]; then
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
importCacheFlags=""
|
||||||
|
exportCacheFlags=""
|
||||||
|
if [ "$GITHUB_ACTIONS" = "true" ]; then
|
||||||
|
if [ -n "$cacheRefFrom" ]; then
|
||||||
|
importCacheFlags="--cache-from=type=local,src=$cacheRefFrom"
|
||||||
|
fi
|
||||||
|
if [ -n "$cacheRefTo" ]; then
|
||||||
|
exportCacheFlags="--cache-to=type=local,dest=$cacheRefTo"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
case $TYP in
|
||||||
|
"binaries")
|
||||||
|
buildxCmd build $importCacheFlags $exportCacheFlags \
|
||||||
|
--target "binaries" \
|
||||||
|
$currentcontext
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo >&2 "Unknown type $TYP"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
24
hack/cross
Executable file
24
hack/cross
Executable file
@@ -0,0 +1,24 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
. $(dirname $0)/util
|
||||||
|
set -e
|
||||||
|
|
||||||
|
: ${TARGETPLATFORM=linux/amd64,linux/arm/v7,linux/arm64,darwin/amd64,windows/amd64,linux/ppc64le,linux/s390x,linux/riscv64}
|
||||||
|
: ${EXPORT_LOCAL=}
|
||||||
|
|
||||||
|
importCacheFlags=""
|
||||||
|
if [ "$GITHUB_ACTIONS" = "true" ]; then
|
||||||
|
if [ -n "$cacheRefFrom" ]; then
|
||||||
|
importCacheFlags="--cache-from=type=local,src=$cacheRefFrom"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
exportFlag=""
|
||||||
|
if [ -n "$EXPORT_LOCAL" ]; then
|
||||||
|
exportFlag="--output=type=local,dest=$EXPORT_LOCAL"
|
||||||
|
fi
|
||||||
|
|
||||||
|
buildxCmd build $importCacheFlags $exportFlag \
|
||||||
|
--target "binaries" \
|
||||||
|
--platform "$TARGETPLATFORM" \
|
||||||
|
$currentcontext
|
||||||
@@ -13,3 +13,7 @@ else
|
|||||||
( $dockerdCmd &>/var/log/dockerd.log & )
|
( $dockerdCmd &>/var/log/dockerd.log & )
|
||||||
exec ash
|
exec ash
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,33 +0,0 @@
|
|||||||
# syntax=docker/dockerfile:1.3-labs
|
|
||||||
|
|
||||||
FROM alpine:3.14 AS gen
|
|
||||||
RUN apk add --no-cache git
|
|
||||||
WORKDIR /src
|
|
||||||
RUN --mount=type=bind,target=. <<EOT
|
|
||||||
#!/usr/bin/env bash
|
|
||||||
set -e
|
|
||||||
mkdir /out
|
|
||||||
# see also ".mailmap" for how email addresses and names are deduplicated
|
|
||||||
{
|
|
||||||
echo "# This file lists all individuals having contributed content to the repository."
|
|
||||||
echo "# For how it is generated, see hack/dockerfiles/authors.Dockerfile."
|
|
||||||
echo
|
|
||||||
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
|
|
||||||
} > /out/AUTHORS
|
|
||||||
cat /out/AUTHORS
|
|
||||||
EOT
|
|
||||||
|
|
||||||
FROM scratch AS update
|
|
||||||
COPY --from=gen /out /
|
|
||||||
|
|
||||||
FROM gen AS validate
|
|
||||||
RUN --mount=type=bind,target=.,rw <<EOT
|
|
||||||
set -e
|
|
||||||
git add -A
|
|
||||||
cp -rf /out/* .
|
|
||||||
if [ -n "$(git status --porcelain -- AUTHORS)" ]; then
|
|
||||||
echo >&2 'ERROR: Authors result differs. Please update with "make authors"'
|
|
||||||
git status --porcelain -- AUTHORS
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
EOT
|
|
||||||
@@ -1,42 +1,29 @@
|
|||||||
# syntax=docker/dockerfile:1.3-labs
|
# syntax = docker/dockerfile:1.2
|
||||||
|
|
||||||
ARG GO_VERSION=1.17
|
FROM golang:1.16-alpine AS docsgen
|
||||||
ARG FORMATS=md,yaml
|
|
||||||
|
|
||||||
FROM golang:${GO_VERSION}-alpine AS docsgen
|
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
RUN --mount=target=. \
|
RUN --mount=target=. \
|
||||||
--mount=target=/root/.cache,type=cache \
|
--mount=target=/root/.cache,type=cache \
|
||||||
go build -mod=vendor -o /out/docsgen ./docs/generate.go
|
go build -mod=vendor -o /out/docsgen ./docs/docsgen
|
||||||
|
|
||||||
FROM alpine AS gen
|
FROM alpine AS gen
|
||||||
RUN apk add --no-cache rsync git
|
RUN apk add --no-cache rsync git
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
COPY --from=docsgen /out/docsgen /usr/bin
|
COPY --from=docsgen /out/docsgen /usr/bin
|
||||||
ARG FORMATS
|
|
||||||
RUN --mount=target=/context \
|
RUN --mount=target=/context \
|
||||||
--mount=target=.,type=tmpfs <<EOT
|
--mount=target=.,type=tmpfs,readwrite \
|
||||||
set -e
|
rsync -a /context/. . && \
|
||||||
rsync -a /context/. .
|
docsgen && \
|
||||||
docsgen --formats "$FORMATS" --source "docs/reference"
|
mkdir /out && cp -r docs/reference /out
|
||||||
mkdir /out
|
|
||||||
cp -r docs/reference /out
|
|
||||||
EOT
|
|
||||||
|
|
||||||
FROM scratch AS update
|
FROM scratch AS update
|
||||||
COPY --from=gen /out /out
|
COPY --from=gen /out /out
|
||||||
|
|
||||||
FROM gen AS validate
|
FROM gen AS validate
|
||||||
RUN --mount=target=/context \
|
RUN --mount=target=/context \
|
||||||
--mount=target=.,type=tmpfs <<EOT
|
--mount=target=.,type=tmpfs,readwrite \
|
||||||
set -e
|
rsync -a /context/. . && \
|
||||||
rsync -a /context/. .
|
git add -A && \
|
||||||
git add -A
|
rm -rf docs/reference/* && \
|
||||||
rm -rf docs/reference/*
|
cp -rf /out/* ./docs/ && \
|
||||||
cp -rf /out/* ./docs/
|
./hack/validate-docs check
|
||||||
if [ -n "$(git status --porcelain -- docs/reference)" ]; then
|
|
||||||
echo >&2 'ERROR: Docs result differs. Please update with "make docs"'
|
|
||||||
git status --porcelain -- docs/reference
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
EOT
|
|
||||||
|
|||||||
@@ -1,12 +1,10 @@
|
|||||||
# syntax=docker/dockerfile:1.3
|
# syntax=docker/dockerfile:1.2
|
||||||
|
|
||||||
ARG GO_VERSION=1.17
|
FROM golang:1.16-alpine
|
||||||
|
|
||||||
FROM golang:${GO_VERSION}-alpine
|
|
||||||
RUN apk add --no-cache gcc musl-dev yamllint
|
RUN apk add --no-cache gcc musl-dev yamllint
|
||||||
RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.36.0
|
RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.36.0
|
||||||
WORKDIR /go/src/github.com/docker/buildx
|
WORKDIR /go/src/github.com/docker/buildx
|
||||||
RUN --mount=target=/go/src/github.com/docker/buildx --mount=target=/root/.cache,type=cache \
|
RUN --mount=target=/go/src/github.com/docker/buildx --mount=target=/root/.cache,type=cache \
|
||||||
golangci-lint run
|
golangci-lint run
|
||||||
RUN --mount=target=/go/src/github.com/docker/buildx --mount=target=/root/.cache,type=cache \
|
RUN --mount=target=/go/src/github.com/docker/buildx --mount=target=/root/.cache,type=cache \
|
||||||
yamllint -c .yamllint.yml --strict .
|
yamllint -c .yamllint.yml --strict .
|
||||||
@@ -1,46 +1,23 @@
|
|||||||
# syntax=docker/dockerfile:1.3-labs
|
# syntax = docker/dockerfile:1.2
|
||||||
|
|
||||||
ARG GO_VERSION=1.17
|
FROM golang:1.16-alpine AS vendored
|
||||||
ARG MODOUTDATED_VERSION=v0.8.0
|
RUN apk add --no-cache git rsync
|
||||||
|
|
||||||
FROM golang:${GO_VERSION}-alpine AS base
|
|
||||||
RUN apk add --no-cache git rsync
|
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
|
|
||||||
FROM base AS vendored
|
|
||||||
RUN --mount=target=/context \
|
RUN --mount=target=/context \
|
||||||
--mount=target=.,type=tmpfs \
|
--mount=target=.,type=tmpfs,readwrite \
|
||||||
--mount=target=/go/pkg/mod,type=cache <<EOT
|
--mount=target=/go/pkg/mod,type=cache \
|
||||||
set -e
|
rsync -a /context/. . && \
|
||||||
rsync -a /context/. .
|
go mod tidy && go mod vendor && \
|
||||||
go mod tidy
|
mkdir /out && cp -r go.mod go.sum vendor /out
|
||||||
go mod vendor
|
|
||||||
mkdir /out
|
|
||||||
cp -r go.mod go.sum vendor /out
|
|
||||||
EOT
|
|
||||||
|
|
||||||
FROM scratch AS update
|
FROM scratch AS update
|
||||||
COPY --from=vendored /out /out
|
COPY --from=vendored /out /out
|
||||||
|
|
||||||
FROM vendored AS validate
|
FROM vendored AS validate
|
||||||
RUN --mount=target=/context \
|
RUN --mount=target=/context \
|
||||||
--mount=target=.,type=tmpfs <<EOT
|
--mount=target=.,type=tmpfs,readwrite \
|
||||||
set -e
|
rsync -a /context/. . && \
|
||||||
rsync -a /context/. .
|
git add -A && \
|
||||||
git add -A
|
rm -rf vendor && \
|
||||||
rm -rf vendor
|
cp -rf /out/* . && \
|
||||||
cp -rf /out/* .
|
./hack/validate-vendor check
|
||||||
if [ -n "$(git status --porcelain -- go.mod go.sum vendor)" ]; then
|
|
||||||
echo >&2 'ERROR: Vendor result differs. Please vendor your package with "make vendor"'
|
|
||||||
git status --porcelain -- go.mod go.sum vendor
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
EOT
|
|
||||||
|
|
||||||
FROM psampaz/go-mod-outdated:${MODOUTDATED_VERSION} AS go-mod-outdated
|
|
||||||
FROM base AS outdated
|
|
||||||
ARG _RANDOM
|
|
||||||
RUN --mount=target=.,ro \
|
|
||||||
--mount=target=/go/pkg/mod,type=cache \
|
|
||||||
--mount=from=go-mod-outdated,source=/home/go-mod-outdated,target=/usr/bin/go-mod-outdated \
|
|
||||||
go list -mod=readonly -u -m -json all | go-mod-outdated -update -direct
|
|
||||||
|
|||||||
21
hack/generate-authors
Executable file
21
hack/generate-authors
Executable file
@@ -0,0 +1,21 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu -o pipefail -x
|
||||||
|
|
||||||
|
if [ -x "$(command -v greadlink)" ]; then
|
||||||
|
# on macOS, GNU readlink is ava (greadlink) can be installed through brew install coreutils
|
||||||
|
cd "$(dirname "$(greadlink -f "$BASH_SOURCE")")/.."
|
||||||
|
else
|
||||||
|
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.."
|
||||||
|
fi
|
||||||
|
|
||||||
|
# see also ".mailmap" for how email addresses and names are deduplicated
|
||||||
|
|
||||||
|
{
|
||||||
|
cat <<-'EOH'
|
||||||
|
# This file lists all individuals having contributed content to the repository.
|
||||||
|
# For how it is generated, see `scripts/generate-authors.sh`.
|
||||||
|
EOH
|
||||||
|
echo
|
||||||
|
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
|
||||||
|
} > AUTHORS
|
||||||
6
hack/lint
Executable file
6
hack/lint
Executable file
@@ -0,0 +1,6 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
. $(dirname $0)/util
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
buildxCmd build --file ./hack/dockerfiles/lint.Dockerfile .
|
||||||
34
hack/release
34
hack/release
@@ -1,20 +1,28 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
OUT=${1:-release-out}
|
||||||
|
|
||||||
|
. $(dirname $0)/util
|
||||||
set -eu -o pipefail
|
set -eu -o pipefail
|
||||||
|
|
||||||
: ${BUILDX_CMD=docker buildx}
|
: ${PLATFORMS=linux/amd64}
|
||||||
: ${RELEASE_OUT=./release-out}
|
|
||||||
|
|
||||||
# release
|
importCacheFlags=""
|
||||||
(set -x ; ${BUILDX_CMD} bake --set "*.output=$RELEASE_OUT" release)
|
if [[ -n "$cacheRefFrom" ]] && [[ "$cacheType" = "local" ]]; then
|
||||||
|
for ref in $cacheRefFrom; do
|
||||||
|
importCacheFlags="$importCacheFlags--cache-from=type=local,src=$ref "
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
buildxCmd build $importCacheFlags \
|
||||||
|
--target "release" \
|
||||||
|
--platform "$PLATFORMS" \
|
||||||
|
--output "type=local,dest=$OUT" \
|
||||||
|
$currentcontext
|
||||||
|
|
||||||
# wrap binaries
|
# wrap binaries
|
||||||
mv -f ./${RELEASE_OUT}/**/* ./${RELEASE_OUT}/
|
{ set +x; } 2>/dev/null
|
||||||
find ./${RELEASE_OUT} -type d -empty -delete
|
if [[ $PLATFORMS =~ "," ]]; then
|
||||||
|
mv -f ./$OUT/**/* ./$OUT/
|
||||||
# checksums
|
find ./$OUT -type d -empty -delete
|
||||||
if ! type shasum > /dev/null 2>&1; then
|
fi
|
||||||
echo >&2 "ERROR: shasum is required"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
find ./${RELEASE_OUT}/ -type f \( -iname "buildx-*" ! -iname "*darwin*" \) -print0 | sort -z | xargs -r0 shasum -a 256 -b | sed 's# .*/# #' > ./${RELEASE_OUT}/checksums.txt
|
|
||||||
11
hack/shell
11
hack/shell
@@ -2,18 +2,17 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
: ${BUILDX_CMD=docker buildx}
|
|
||||||
: ${TMUX=}
|
: ${TMUX=}
|
||||||
|
|
||||||
function clean {
|
function clean {
|
||||||
docker rmi $iid
|
docker rmi $(cat $iidfile)
|
||||||
}
|
}
|
||||||
|
|
||||||
iid=buildx-shell
|
iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX)
|
||||||
(set -x ; ${BUILDX_CMD} build --output "type=docker,name=$iid" --target shell .)
|
DOCKER_BUILDKIT=1 docker build --iidfile $iidfile --target demo-env .
|
||||||
trap clean EXIT
|
trap clean EXIT
|
||||||
SSH=
|
SSH=
|
||||||
if [ -n "$MOUNT_SSH_AUTH_SOCK" ]; then
|
if [ -n "$MOUNT_SSH_AUTH_SOCK" ]; then
|
||||||
SSH="-v $SSH_AUTH_SOCK:$SSH_AUTH_SOCK -e SSH_AUTH_SOCK"
|
SSH="-v $SSH_AUTH_SOCK:$SSH_AUTH_SOCK -e SSH_AUTH_SOCK"
|
||||||
fi
|
fi
|
||||||
docker run $SSH -it --privileged --rm -e TMUX_ENTRYPOINT=$TMUX $iid
|
docker run $SSH -it --privileged --rm -e TMUX_ENTRYPOINT=$TMUX $(cat $iidfile)
|
||||||
|
|||||||
47
hack/test
Executable file
47
hack/test
Executable file
@@ -0,0 +1,47 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
. $(dirname $0)/util
|
||||||
|
set -eu -o pipefail
|
||||||
|
|
||||||
|
: ${BUILDX_NOCACHE=}
|
||||||
|
: ${TEST_COVERAGE=}
|
||||||
|
|
||||||
|
importCacheFlags=""
|
||||||
|
if [ -n "$cacheRefFrom" ]; then
|
||||||
|
if [ "$cacheType" = "local" ]; then
|
||||||
|
for ref in $cacheRefFrom; do
|
||||||
|
importCacheFlags="$importCacheFlags--cache-from=type=local,src=$ref "
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
iid="buildx-tests"
|
||||||
|
iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX)
|
||||||
|
|
||||||
|
coverageVol=""
|
||||||
|
coverageFlags=""
|
||||||
|
if [ "$TEST_COVERAGE" = "1" ]; then
|
||||||
|
covdir="$(pwd)/coverage"
|
||||||
|
mkdir -p "$covdir"
|
||||||
|
coverageVol="-v $covdir:/coverage"
|
||||||
|
coverageFlags="-coverprofile=/coverage/coverage.txt -covermode=atomic"
|
||||||
|
fi
|
||||||
|
|
||||||
|
buildxCmd build $importCacheFlags \
|
||||||
|
--target "integration-tests" \
|
||||||
|
--output "type=docker,name=$iid" \
|
||||||
|
$currentcontext
|
||||||
|
|
||||||
|
cacheVolume="buildx-cache"
|
||||||
|
if ! docker inspect "$cacheVolume" > /dev/null 2>&1; then
|
||||||
|
cacheVolume=$(docker create --name=buildx-cache -v /root/.cache -v /go/pkg/mod alpine)
|
||||||
|
fi
|
||||||
|
|
||||||
|
docker run --rm -v /tmp $coverageVol --volumes-from=$cacheVolume --privileged $iid go test $coverageFlags ${TESTFLAGS:--v} ${TESTPKGS:-./...}
|
||||||
|
|
||||||
|
if [ -n "$BUILDX_NOCACHE" ]; then
|
||||||
|
docker rm -v $cacheVolume
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm "$iidfile"
|
||||||
|
docker rmi $iid
|
||||||
161
hack/test-driver
161
hack/test-driver
@@ -1,161 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -eu -o pipefail
|
|
||||||
|
|
||||||
: ${BUILDX_CMD=docker buildx}
|
|
||||||
: ${BUILDKIT_IMAGE=moby/buildkit:buildx-stable-1}
|
|
||||||
: ${BUILDKIT_CFG=}
|
|
||||||
: ${DRIVER=docker-container}
|
|
||||||
: ${DRIVER_OPT=}
|
|
||||||
: ${MULTI_NODE=0}
|
|
||||||
: ${PLATFORMS=linux/amd64,linux/arm64}
|
|
||||||
|
|
||||||
function buildxCmd {
|
|
||||||
(set -x ; $BUILDX_CMD "$@")
|
|
||||||
}
|
|
||||||
|
|
||||||
function clean {
|
|
||||||
rm -rf "$context"
|
|
||||||
if [ "$builderName" != "default" ]; then
|
|
||||||
buildxCmd rm "$builderName"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
context=$(mktemp -d -t buildx-output.XXXXXXXXXX)
|
|
||||||
dockerfile=${context}/Dockerfile
|
|
||||||
bakedef=${context}/docker-bake.hcl
|
|
||||||
trap clean EXIT
|
|
||||||
|
|
||||||
builderName=buildx-test-$(openssl rand -hex 16)
|
|
||||||
buildPlatformFlag=
|
|
||||||
if [ "$DRIVER" = "docker" ]; then
|
|
||||||
builderName=default
|
|
||||||
else
|
|
||||||
buildPlatformFlag=--platform="${PLATFORMS}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
driverOpt=image=${BUILDKIT_IMAGE}
|
|
||||||
if [ -n "$DRIVER_OPT" ]; then
|
|
||||||
driverOpt=$driverOpt,$DRIVER_OPT
|
|
||||||
fi
|
|
||||||
|
|
||||||
# create builder except for docker driver
|
|
||||||
if [ "$DRIVER" != "docker" ]; then
|
|
||||||
if [ "${MULTI_NODE}" = "1" ]; then
|
|
||||||
firstNode=1
|
|
||||||
for platform in ${PLATFORMS//,/ }; do
|
|
||||||
createFlags=""
|
|
||||||
if [ -f "$BUILDKIT_CFG" ]; then
|
|
||||||
createFlags="$createFlags --config=${BUILDKIT_CFG}"
|
|
||||||
fi
|
|
||||||
if [ "$firstNode" = "0" ]; then
|
|
||||||
createFlags="$createFlags --append"
|
|
||||||
fi
|
|
||||||
buildxCmd create ${createFlags} \
|
|
||||||
--name="${builderName}" \
|
|
||||||
--node="${builderName}-${platform/\//-}" \
|
|
||||||
--driver="${DRIVER}" \
|
|
||||||
--driver-opt="${driverOpt}" \
|
|
||||||
--platform="${platform}"
|
|
||||||
firstNode=0
|
|
||||||
done
|
|
||||||
else
|
|
||||||
createFlags=""
|
|
||||||
if [ -f "$BUILDKIT_CFG" ]; then
|
|
||||||
createFlags="$createFlags --config=${BUILDKIT_CFG}"
|
|
||||||
fi
|
|
||||||
buildxCmd create ${createFlags} \
|
|
||||||
--name="${builderName}" \
|
|
||||||
--driver="${DRIVER}" \
|
|
||||||
--driver-opt="${driverOpt}" \
|
|
||||||
--platform="${PLATFORMS}"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
function buildOutput {
|
|
||||||
local name=$1
|
|
||||||
if [ "$DRIVER" != "docker" ]; then
|
|
||||||
if [ "${MULTI_NODE}" = "1" ]; then
|
|
||||||
echo "type=cacheonly"
|
|
||||||
else
|
|
||||||
echo "type=oci,dest=${context}/${name}.tar"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "type=docker,name=${name}"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# multi-platform not supported by docker driver
|
|
||||||
buildPlatformFlag=
|
|
||||||
bakePlatformFlag=
|
|
||||||
if [ "$DRIVER" != "docker" ]; then
|
|
||||||
buildPlatformFlag=--platform="${PLATFORMS}"
|
|
||||||
bakePlatformFlag=--set="*.platform=${PLATFORMS}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# inspect and bootstrap
|
|
||||||
buildxCmd inspect --bootstrap --builder="${builderName}"
|
|
||||||
|
|
||||||
# create dockerfile
|
|
||||||
cat > "${dockerfile}" <<EOL
|
|
||||||
FROM busybox as build
|
|
||||||
ARG TARGETPLATFORM
|
|
||||||
ARG BUILDPLATFORM
|
|
||||||
RUN echo "I am running on \$BUILDPLATFORM, building for \$TARGETPLATFORM" > /log
|
|
||||||
|
|
||||||
FROM busybox AS log
|
|
||||||
COPY --from=build /log /log
|
|
||||||
RUN cat /log
|
|
||||||
RUN uname -a
|
|
||||||
|
|
||||||
FROM busybox AS hello
|
|
||||||
RUN echo hello > /hello
|
|
||||||
|
|
||||||
FROM scratch
|
|
||||||
COPY --from=log /log /log
|
|
||||||
COPY --from=hello /hello /hello
|
|
||||||
EOL
|
|
||||||
|
|
||||||
# build
|
|
||||||
buildxCmd build ${buildPlatformFlag} \
|
|
||||||
--output="$(buildOutput buildx-test-build)" \
|
|
||||||
--builder="${builderName}" \
|
|
||||||
--metadata-file="${context}/metadata-build.json" \
|
|
||||||
"${context}"
|
|
||||||
cat "${context}/metadata-build.json"
|
|
||||||
|
|
||||||
# create bake def
|
|
||||||
cat > "${bakedef}" <<EOL
|
|
||||||
group "default" {
|
|
||||||
targets = ["release"]
|
|
||||||
}
|
|
||||||
group "all" {
|
|
||||||
targets = ["log", "hello"]
|
|
||||||
}
|
|
||||||
target "release" {
|
|
||||||
output = ["$(buildOutput buildx-test-bake-release)"]
|
|
||||||
}
|
|
||||||
target "log" {
|
|
||||||
output = ["$(buildOutput buildx-test-bake-log)"]
|
|
||||||
}
|
|
||||||
target "hello" {
|
|
||||||
output = ["$(buildOutput buildx-test-bake-hello)"]
|
|
||||||
}
|
|
||||||
EOL
|
|
||||||
|
|
||||||
# bake default target
|
|
||||||
buildxCmd bake ${bakePlatformFlag} \
|
|
||||||
--file="${bakedef}" \
|
|
||||||
--builder="${builderName}" \
|
|
||||||
--set "*.context=${context}" \
|
|
||||||
--metadata-file="${context}/metadata-bake-def.json"
|
|
||||||
cat "${context}/metadata-bake-def.json"
|
|
||||||
|
|
||||||
# bake all target
|
|
||||||
buildxCmd bake ${bakePlatformFlag} \
|
|
||||||
--file="${bakedef}" \
|
|
||||||
--builder="${builderName}" \
|
|
||||||
--set "*.context=${context}" \
|
|
||||||
--metadata-file="${context}/metadata-bake-all.json" \
|
|
||||||
all
|
|
||||||
cat "${context}/metadata-bake-all.json"
|
|
||||||
@@ -1,12 +1,16 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
set -eu -o pipefail
|
. $(dirname $0)/util
|
||||||
|
set -eu
|
||||||
: ${BUILDX_CMD=docker buildx}
|
|
||||||
: ${FORMATS=md}
|
|
||||||
|
|
||||||
output=$(mktemp -d -t buildx-output.XXXXXXXXXX)
|
output=$(mktemp -d -t buildx-output.XXXXXXXXXX)
|
||||||
(set -x ; DOCS_FORMATS=$FORMATS ${BUILDX_CMD} bake --set "*.output=$output" update-docs)
|
|
||||||
|
buildxCmd build \
|
||||||
|
--target "update" \
|
||||||
|
--output "type=local,dest=$output" \
|
||||||
|
--file "./hack/dockerfiles/docs.Dockerfile" \
|
||||||
|
.
|
||||||
|
|
||||||
rm -rf ./docs/reference/*
|
rm -rf ./docs/reference/*
|
||||||
cp -R "$output"/out/* ./docs/
|
cp -R "$output"/out/* ./docs/
|
||||||
rm -rf $output
|
rm -rf $output
|
||||||
|
|||||||
@@ -1,11 +1,16 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
set -eu -o pipefail
|
. $(dirname $0)/util
|
||||||
|
set -eu
|
||||||
: ${BUILDX_CMD=docker buildx}
|
|
||||||
|
|
||||||
output=$(mktemp -d -t buildx-output.XXXXXXXXXX)
|
output=$(mktemp -d -t buildx-output.XXXXXXXXXX)
|
||||||
(set -x ; ${BUILDX_CMD} bake --set "*.output=$output" update-vendor)
|
|
||||||
|
buildxCmd build \
|
||||||
|
--target "update" \
|
||||||
|
--output "type=local,dest=$output" \
|
||||||
|
--file "./hack/dockerfiles/vendor.Dockerfile" \
|
||||||
|
.
|
||||||
|
|
||||||
rm -rf ./vendor
|
rm -rf ./vendor
|
||||||
cp -R "$output"/out/* .
|
cp -R "$output"/out/* .
|
||||||
rm -rf $output
|
rm -rf $output
|
||||||
|
|||||||
66
hack/util
Executable file
66
hack/util
Executable file
@@ -0,0 +1,66 @@
|
|||||||
|
#!/usr/bin/env sh
|
||||||
|
|
||||||
|
: ${CI=}
|
||||||
|
: ${PREFER_BUILDCTL=}
|
||||||
|
: ${PREFER_LEGACY=}
|
||||||
|
: ${CLI_PLATFORM=}
|
||||||
|
: ${GITHUB_ACTIONS=}
|
||||||
|
: ${CACHEDIR_FROM=}
|
||||||
|
: ${CACHEDIR_TO=}
|
||||||
|
|
||||||
|
if [ "$PREFER_BUILDCTL" = "1" ]; then
|
||||||
|
echo >&2 "WARNING: PREFER_BUILDCTL is no longer supported. Ignoring."
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$PREFER_LEGACY" = "1" ]; then
|
||||||
|
echo >&2 "WARNING: PREFER_LEGACY is no longer supported. Ignoring."
|
||||||
|
fi
|
||||||
|
|
||||||
|
progressFlag=""
|
||||||
|
if [ "$CI" = "true" ]; then
|
||||||
|
progressFlag="--progress=plain"
|
||||||
|
fi
|
||||||
|
|
||||||
|
buildxCmd() {
|
||||||
|
if docker buildx version >/dev/null 2>&1; then
|
||||||
|
set -x
|
||||||
|
docker buildx "$@" $progressFlag
|
||||||
|
elif buildx version >/dev/null 2>&1; then
|
||||||
|
set -x
|
||||||
|
buildx "$@" $progressFlag
|
||||||
|
elif docker version >/dev/null 2>&1; then
|
||||||
|
set -x
|
||||||
|
DOCKER_BUILDKIT=1 docker "$@" $progressFlag
|
||||||
|
else
|
||||||
|
echo >&2 "ERROR: Please enable DOCKER_BUILDKIT or install standalone buildx"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ -z "$CLI_PLATFORM" ]; then
|
||||||
|
if [ "$(uname -s)" = "Darwin" ]; then
|
||||||
|
arch="$(uname -m)"
|
||||||
|
if [ "$arch" = "x86_64" ]; then
|
||||||
|
arch="amd64"
|
||||||
|
fi
|
||||||
|
CLI_PLATFORM="darwin/$arch"
|
||||||
|
elif uname -s | grep MINGW > /dev/null 2>&1 ; then
|
||||||
|
CLI_PLATFORM="windows/amd64"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
cacheType=""
|
||||||
|
cacheRefFrom=""
|
||||||
|
cacheRefTo=""
|
||||||
|
currentref=""
|
||||||
|
if [ "$GITHUB_ACTIONS" = "true" ]; then
|
||||||
|
currentref="git://github.com/$GITHUB_REPOSITORY#$GITHUB_REF"
|
||||||
|
cacheType="local"
|
||||||
|
cacheRefFrom="$CACHEDIR_FROM"
|
||||||
|
cacheRefTo="$CACHEDIR_TO"
|
||||||
|
fi
|
||||||
|
|
||||||
|
currentcontext="."
|
||||||
|
if [ -n "$currentref" ]; then
|
||||||
|
currentcontext="--build-arg BUILDKIT_CONTEXT_KEEP_GIT_DIR=1 $currentref"
|
||||||
|
fi
|
||||||
29
hack/validate-docs
Executable file
29
hack/validate-docs
Executable file
@@ -0,0 +1,29 @@
|
|||||||
|
#!/usr/bin/env sh
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
case ${1:-} in
|
||||||
|
'')
|
||||||
|
. $(dirname $0)/util
|
||||||
|
buildxCmd build \
|
||||||
|
--target validate \
|
||||||
|
--file ./hack/dockerfiles/docs.Dockerfile \
|
||||||
|
.
|
||||||
|
;;
|
||||||
|
check)
|
||||||
|
status="$(git status --porcelain -- docs/reference 2>/dev/null)"
|
||||||
|
diffs=$(echo "$status" | grep -v '^[RAD] ' || true)
|
||||||
|
if [ "$diffs" ]; then
|
||||||
|
{
|
||||||
|
set +x
|
||||||
|
echo 'The result of ./hack/update-docs differs'
|
||||||
|
echo
|
||||||
|
echo "$diffs"
|
||||||
|
echo
|
||||||
|
echo 'Please vendor your package with ./hack/update-docs'
|
||||||
|
echo
|
||||||
|
} >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo 'Congratulations! All docs changes are done the right way.'
|
||||||
|
;;
|
||||||
|
esac
|
||||||
29
hack/validate-vendor
Executable file
29
hack/validate-vendor
Executable file
@@ -0,0 +1,29 @@
|
|||||||
|
#!/usr/bin/env sh
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
case ${1:-} in
|
||||||
|
'')
|
||||||
|
. $(dirname $0)/util
|
||||||
|
buildxCmd build \
|
||||||
|
--target validate \
|
||||||
|
--file ./hack/dockerfiles/vendor.Dockerfile \
|
||||||
|
.
|
||||||
|
;;
|
||||||
|
check)
|
||||||
|
status="$(git status --porcelain -- go.mod go.sum vendor 2>/dev/null)"
|
||||||
|
diffs=$(echo "$status" | grep -v '^[RAD] ' || true)
|
||||||
|
if [ "$diffs" ]; then
|
||||||
|
{
|
||||||
|
set +x
|
||||||
|
echo 'The result of "make vendor" differs'
|
||||||
|
echo
|
||||||
|
echo "$diffs"
|
||||||
|
echo
|
||||||
|
echo 'Please vendor your package with "make vendor"'
|
||||||
|
echo
|
||||||
|
} >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo 'Congratulations! All vendoring changes are done the right way.'
|
||||||
|
;;
|
||||||
|
esac
|
||||||
@@ -4,7 +4,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/containerd/containerd/platforms"
|
"github.com/containerd/containerd/platforms"
|
||||||
"github.com/docker/buildx/util/confutil"
|
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -22,9 +21,8 @@ type Node struct {
|
|||||||
Endpoint string
|
Endpoint string
|
||||||
Platforms []specs.Platform
|
Platforms []specs.Platform
|
||||||
Flags []string
|
Flags []string
|
||||||
|
ConfigFile string
|
||||||
DriverOpts map[string]string
|
DriverOpts map[string]string
|
||||||
|
|
||||||
Files map[string][]byte
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ng *NodeGroup) Leave(name string) error {
|
func (ng *NodeGroup) Leave(name string) error {
|
||||||
@@ -90,18 +88,10 @@ func (ng *NodeGroup) Update(name, endpoint string, platforms []string, endpoints
|
|||||||
Name: name,
|
Name: name,
|
||||||
Endpoint: endpoint,
|
Endpoint: endpoint,
|
||||||
Platforms: pp,
|
Platforms: pp,
|
||||||
|
ConfigFile: configFile,
|
||||||
Flags: flags,
|
Flags: flags,
|
||||||
DriverOpts: do,
|
DriverOpts: do,
|
||||||
}
|
}
|
||||||
|
|
||||||
if configFile != "" {
|
|
||||||
files, err := confutil.LoadConfigFiles(configFile)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
n.Files = files
|
|
||||||
}
|
|
||||||
|
|
||||||
ng.Nodes = append(ng.Nodes, n)
|
ng.Nodes = append(ng.Nodes, n)
|
||||||
|
|
||||||
if err := ng.validateDuplicates(endpoint, len(ng.Nodes)-1); err != nil {
|
if err := ng.validateDuplicates(endpoint, len(ng.Nodes)-1); err != nil {
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user