Compare commits
239 Commits
v0.5.0
...
v0.7.0-rc1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5c4e3fc860 | ||
|
|
eab0e6a8fe | ||
|
|
4c938c77ba | ||
|
|
1cca41b81a | ||
|
|
c62472121b | ||
|
|
88d0775692 | ||
|
|
8afc82b427 | ||
|
|
d311561a8b | ||
|
|
44e180b26e | ||
|
|
02d29e0af5 | ||
|
|
40121c671c | ||
|
|
4c1621cccd | ||
|
|
7f0e37531c | ||
|
|
82b212bddf | ||
|
|
aa52a5a699 | ||
|
|
49342dd54d | ||
|
|
3f716f00fa | ||
|
|
5e25191cb6 | ||
|
|
dd15969c93 | ||
|
|
81cf2064c4 | ||
|
|
b497587f21 | ||
|
|
2890209a11 | ||
|
|
4690e14c40 | ||
|
|
25d2f73858 | ||
|
|
36a37a624e | ||
|
|
e150d7bdd8 | ||
|
|
be2c8f71fe | ||
|
|
89f5c1ce51 | ||
|
|
b6474d43a9 | ||
|
|
2644d56a6d | ||
|
|
084b6c0a95 | ||
|
|
8e5595b7c7 | ||
|
|
22500c9929 | ||
|
|
050f4f9219 | ||
|
|
1a56de8e68 | ||
|
|
868610e0e9 | ||
|
|
b89e2f35df | ||
|
|
1b3068df7c | ||
|
|
461369748c | ||
|
|
d5908cdddf | ||
|
|
b5bc754bad | ||
|
|
dff7673afb | ||
|
|
3e2fde5639 | ||
|
|
7a7b73c043 | ||
|
|
e50c9ae7be | ||
|
|
9e62c9f074 | ||
|
|
c82dbafaee | ||
|
|
0e4d7aa7a9 | ||
|
|
c05a6eb2c1 | ||
|
|
eec1693f30 | ||
|
|
c643c2ca95 | ||
|
|
761e22e395 | ||
|
|
ef8c936b27 | ||
|
|
0cea838344 | ||
|
|
2b18a9b4a5 | ||
|
|
45e4550c36 | ||
|
|
6fc906532b | ||
|
|
06541ebd0f | ||
|
|
773fac9a73 | ||
|
|
7f0e05dfac | ||
|
|
e59aecf034 | ||
|
|
ac9a1612d2 | ||
|
|
c83812144c | ||
|
|
df521e4e96 | ||
|
|
00cb53d0ef | ||
|
|
6cfef7fa36 | ||
|
|
b05c313204 | ||
|
|
3e8bbbc286 | ||
|
|
8a12884814 | ||
|
|
6cf9fa8261 | ||
|
|
fd94fc5fdf | ||
|
|
45c678ad26 | ||
|
|
55a3ce606f | ||
|
|
c1c414e4c9 | ||
|
|
610601cec0 | ||
|
|
9833420a03 | ||
|
|
7f322caa79 | ||
|
|
93867d02f0 | ||
|
|
b8a602821c | ||
|
|
a8a3b1738e | ||
|
|
ef3e46fd62 | ||
|
|
3ab0b6953a | ||
|
|
c19c018a4c | ||
|
|
422ba60b04 | ||
|
|
2d3763990c | ||
|
|
dc6ada9b50 | ||
|
|
cb185f095f | ||
|
|
89e126fa60 | ||
|
|
04bac63745 | ||
|
|
3594851128 | ||
|
|
58e5a73389 | ||
|
|
c685e46609 | ||
|
|
e3283e6169 | ||
|
|
5d50bd7b43 | ||
|
|
3dfbe2c184 | ||
|
|
06367a120b | ||
|
|
6149507c7e | ||
|
|
c76b5eac03 | ||
|
|
cd133cee25 | ||
|
|
eeab638476 | ||
|
|
19b9b86af8 | ||
|
|
0101c96532 | ||
|
|
85dedf1aea | ||
|
|
5f05bd9a2b | ||
|
|
260d07a9a1 | ||
|
|
9aa8f09f14 | ||
|
|
0363b676bc | ||
|
|
a10045e8cb | ||
|
|
0afcca221d | ||
|
|
5daf176722 | ||
|
|
3d1ab82dc6 | ||
|
|
872430d2d3 | ||
|
|
7d312eaa0a | ||
|
|
a6bc4ed21e | ||
|
|
3768ab268b | ||
|
|
4c2daeb852 | ||
|
|
d9ee3b134c | ||
|
|
0b6ba1cd32 | ||
|
|
65a6955db8 | ||
|
|
258d12b2e7 | ||
|
|
6e3a319a9d | ||
|
|
1bb425a882 | ||
|
|
5f6ad50df4 | ||
|
|
9d88450118 | ||
|
|
334c93fbbe | ||
|
|
6ba080d337 | ||
|
|
ba443811e4 | ||
|
|
67bd6f4dc8 | ||
|
|
9f50eccbd7 | ||
|
|
12db50748b | ||
|
|
9b4937f062 | ||
|
|
3d48359e95 | ||
|
|
70002ebbc7 | ||
|
|
ef95f8135b | ||
|
|
9215fc56a3 | ||
|
|
1253020b3d | ||
|
|
621c55066c | ||
|
|
77632ac15f | ||
|
|
db6aa34252 | ||
|
|
7ecfd3d298 | ||
|
|
9a8c287629 | ||
|
|
591099a4b8 | ||
|
|
31309b9205 | ||
|
|
8c0cefcd89 | ||
|
|
a07f5cdf42 | ||
|
|
a1d899d400 | ||
|
|
886e1a378c | ||
|
|
47b7ba4e79 | ||
|
|
79433cef7a | ||
|
|
c5eb8f58b4 | ||
|
|
03b7128b60 | ||
|
|
15b358bec6 | ||
|
|
a53e392afb | ||
|
|
4fec647b9d | ||
|
|
d7b28fb4d3 | ||
|
|
9bc9291fc9 | ||
|
|
df7a318ec0 | ||
|
|
908a856079 | ||
|
|
8d64b6484f | ||
|
|
399df854ea | ||
|
|
328441cdc6 | ||
|
|
5ca0cbff8e | ||
|
|
ab09846df7 | ||
|
|
cd3a9ad38d | ||
|
|
adc5f35237 | ||
|
|
0b984e429b | ||
|
|
eec843a325 | ||
|
|
83868a48b7 | ||
|
|
98d337af21 | ||
|
|
b2c7dc00cc | ||
|
|
44ddc5a02b | ||
|
|
f036bba48c | ||
|
|
0fe2ce7fac | ||
|
|
0147b92230 | ||
|
|
4047bccf6c | ||
|
|
363c0fdf4b | ||
|
|
c46407b2d3 | ||
|
|
ca0f5dabea | ||
|
|
17d4106e1b | ||
|
|
442d38080e | ||
|
|
87ec3af5bb | ||
|
|
1a8af33ff6 | ||
|
|
ff749d8863 | ||
|
|
2d86ddd37f | ||
|
|
e1bbb9d8de | ||
|
|
d7964be29c | ||
|
|
3fef64f584 | ||
|
|
319b6503a5 | ||
|
|
d40a6082fa | ||
|
|
28809b82a2 | ||
|
|
c9f02c32d4 | ||
|
|
55d5b80dfe | ||
|
|
33f25acb08 | ||
|
|
0e9066f6ed | ||
|
|
7d2e30096b | ||
|
|
0e9d6460db | ||
|
|
927163bf13 | ||
|
|
8ac1cf6e45 | ||
|
|
dba79ba223 | ||
|
|
905be6431b | ||
|
|
ad95d6ba04 | ||
|
|
b77690a373 | ||
|
|
84a734dc87 | ||
|
|
5079b64ab5 | ||
|
|
6a343488d2 | ||
|
|
98c3ef60e6 | ||
|
|
73fa351b1c | ||
|
|
c88f7fc307 | ||
|
|
55b8712268 | ||
|
|
7878f0c514 | ||
|
|
0f09e2ecfe | ||
|
|
bea3acd4b6 | ||
|
|
fb9004d6b2 | ||
|
|
42b7e7bc56 | ||
|
|
4b2ddd5b6e | ||
|
|
b3006221f1 | ||
|
|
e57108e7c9 | ||
|
|
6b3dc6687b | ||
|
|
92f6f9f973 | ||
|
|
a56a4c00dd | ||
|
|
ee4a115d4c | ||
|
|
976a58c918 | ||
|
|
db82aa1b77 | ||
|
|
d05504c50f | ||
|
|
f1f464e364 | ||
|
|
57b875a955 | ||
|
|
ea5d32ddff | ||
|
|
da8c8ccaf5 | ||
|
|
dcbe4b3e1a | ||
|
|
68cebffe13 | ||
|
|
96e7f3224a | ||
|
|
f6d83c97bb | ||
|
|
74f76cf4e9 | ||
|
|
8b8725d1fd | ||
|
|
20494f799d | ||
|
|
dd13e16bc7 | ||
|
|
11057da373 | ||
|
|
381dc8fb43 | ||
|
|
f349ba8750 |
@@ -1,2 +1,3 @@
|
|||||||
bin/
|
bin/
|
||||||
cross-out/
|
cross-out/
|
||||||
|
release-out/
|
||||||
|
|||||||
169
.github/workflows/build.yml
vendored
169
.github/workflows/build.yml
vendored
@@ -5,69 +5,26 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'master'
|
- 'master'
|
||||||
|
- 'v[0-9]*'
|
||||||
tags:
|
tags:
|
||||||
- 'v*'
|
- 'v*'
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- 'master'
|
- 'master'
|
||||||
|
- 'v[0-9]*'
|
||||||
|
|
||||||
env:
|
env:
|
||||||
|
REPO_SLUG: "docker/buildx-bin"
|
||||||
REPO_SLUG_ORIGIN: "moby/buildkit:master"
|
REPO_SLUG_ORIGIN: "moby/buildkit:master"
|
||||||
CACHEKEY_BINARIES: "binaries"
|
RELEASE_OUT: "./release-out"
|
||||||
PLATFORMS: "linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64,linux/s390x,linux/ppc64le"
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
base:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
-
|
|
||||||
name: Cache ${{ env.CACHEKEY_BINARIES }}
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
|
||||||
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
with:
|
|
||||||
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
|
||||||
-
|
|
||||||
name: Build ${{ env.CACHEKEY_BINARIES }}
|
|
||||||
run: |
|
|
||||||
./hack/build_ci_first_pass binaries
|
|
||||||
env:
|
|
||||||
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
|
||||||
CACHEDIR_TO: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}-new
|
|
||||||
-
|
|
||||||
# FIXME: Temp fix for https://github.com/moby/buildkit/issues/1850
|
|
||||||
name: Move cache
|
|
||||||
run: |
|
|
||||||
rm -rf /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
|
||||||
mv /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}-new /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
|
||||||
|
|
||||||
test:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: [base]
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
-
|
|
||||||
name: Cache ${{ env.CACHEKEY_BINARIES }}
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
|
||||||
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
|
|
||||||
-
|
-
|
||||||
name: Set up QEMU
|
name: Set up QEMU
|
||||||
uses: docker/setup-qemu-action@v1
|
uses: docker/setup-qemu-action@v1
|
||||||
@@ -80,99 +37,50 @@ jobs:
|
|||||||
name: Test
|
name: Test
|
||||||
run: |
|
run: |
|
||||||
make test
|
make test
|
||||||
env:
|
|
||||||
TEST_COVERAGE: 1
|
|
||||||
TESTFLAGS: -v --parallel=6 --timeout=20m
|
|
||||||
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
|
||||||
-
|
-
|
||||||
name: Send to Codecov
|
name: Send to Codecov
|
||||||
uses: codecov/codecov-action@v1
|
uses: codecov/codecov-action@v2
|
||||||
with:
|
with:
|
||||||
file: ./coverage/coverage.txt
|
file: ./coverage/coverage.txt
|
||||||
|
|
||||||
cross:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: [base]
|
|
||||||
steps:
|
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Build binaries
|
||||||
uses: actions/checkout@v2
|
|
||||||
-
|
|
||||||
name: Cache ${{ env.CACHEKEY_BINARIES }}
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
|
||||||
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
with:
|
|
||||||
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
|
||||||
-
|
|
||||||
name: Cross
|
|
||||||
run: |
|
run: |
|
||||||
make cross
|
make release
|
||||||
env:
|
|
||||||
TARGETPLATFORM: ${{ env.PLATFORMS }},darwin/amd64,windows/amd64
|
|
||||||
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
|
||||||
|
|
||||||
binaries:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: [test, cross]
|
|
||||||
steps:
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
-
|
|
||||||
name: Prepare
|
|
||||||
id: prep
|
|
||||||
run: |
|
|
||||||
TAG=pr
|
|
||||||
if [[ $GITHUB_REF == refs/tags/v* ]]; then
|
|
||||||
TAG=${GITHUB_REF#refs/tags/}
|
|
||||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
|
||||||
TAG=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
|
||||||
fi
|
|
||||||
echo ::set-output name=tag::${TAG}
|
|
||||||
-
|
|
||||||
name: Cache ${{ env.CACHEKEY_BINARIES }}
|
|
||||||
uses: actions/cache@v2
|
|
||||||
with:
|
|
||||||
path: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
|
||||||
key: ${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-${{ github.sha }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-buildx-${{ env.CACHEKEY_BINARIES }}-
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v1
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
with:
|
|
||||||
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
|
||||||
-
|
|
||||||
name: Build ${{ steps.prep.outputs.tag }}
|
|
||||||
run: |
|
|
||||||
./hack/release "${{ steps.prep.outputs.tag }}" release-out
|
|
||||||
env:
|
|
||||||
PLATFORMS: ${{ env.PLATFORMS }},darwin/amd64,windows/amd64
|
|
||||||
CACHEDIR_FROM: /tmp/.buildx-cache/${{ env.CACHEKEY_BINARIES }}
|
|
||||||
-
|
|
||||||
name: Move artifacts
|
|
||||||
run: |
|
|
||||||
mv ./release-out/**/* ./release-out/
|
|
||||||
-
|
-
|
||||||
name: Upload artifacts
|
name: Upload artifacts
|
||||||
uses: actions/upload-artifact@v2
|
uses: actions/upload-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: buildx
|
name: buildx
|
||||||
path: ./release-out/*
|
path: ${{ env.RELEASE_OUT }}/*
|
||||||
if-no-files-found: error
|
if-no-files-found: error
|
||||||
|
-
|
||||||
|
name: Docker meta
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v3
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
${{ env.REPO_SLUG }}
|
||||||
|
tags: |
|
||||||
|
type=ref,event=branch
|
||||||
|
type=ref,event=pr
|
||||||
|
type=semver,pattern={{version}}
|
||||||
|
bake-target: meta-helper
|
||||||
|
-
|
||||||
|
name: Login to DockerHub
|
||||||
|
if: github.event_name != 'pull_request'
|
||||||
|
uses: docker/login-action@v1
|
||||||
|
with:
|
||||||
|
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||||
|
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||||
|
-
|
||||||
|
name: Build and push image
|
||||||
|
uses: docker/bake-action@v1
|
||||||
|
with:
|
||||||
|
files: |
|
||||||
|
./docker-bake.hcl
|
||||||
|
${{ steps.meta.outputs.bake-file }}
|
||||||
|
targets: image-cross
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
-
|
-
|
||||||
name: GitHub Release
|
name: GitHub Release
|
||||||
if: startsWith(github.ref, 'refs/tags/v')
|
if: startsWith(github.ref, 'refs/tags/v')
|
||||||
@@ -181,5 +89,4 @@ jobs:
|
|||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
with:
|
with:
|
||||||
draft: true
|
draft: true
|
||||||
files: ./release-out/*
|
files: ${{ env.RELEASE_OUT }}/*
|
||||||
name: ${{ steps.prep.outputs.tag }}
|
|
||||||
|
|||||||
100
.github/workflows/e2e.yml
vendored
Normal file
100
.github/workflows/e2e.yml
vendored
Normal file
@@ -0,0 +1,100 @@
|
|||||||
|
name: e2e
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
- 'v[0-9]*'
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- 'master'
|
||||||
|
- 'v[0-9]*'
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
driver:
|
||||||
|
runs-on: ubuntu-20.04
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
driver:
|
||||||
|
- docker
|
||||||
|
- docker-container
|
||||||
|
- kubernetes
|
||||||
|
buildkit:
|
||||||
|
- moby/buildkit:buildx-stable-1
|
||||||
|
- moby/buildkit:master
|
||||||
|
buildkit-cfg:
|
||||||
|
- bkcfg-false
|
||||||
|
- bkcfg-true
|
||||||
|
multi-node:
|
||||||
|
- mnode-false
|
||||||
|
- mnode-true
|
||||||
|
platforms:
|
||||||
|
- linux/amd64,linux/arm64
|
||||||
|
include:
|
||||||
|
- driver: kubernetes
|
||||||
|
driver-opt: qemu.install=true
|
||||||
|
exclude:
|
||||||
|
- driver: docker
|
||||||
|
multi-node: mnode-true
|
||||||
|
- driver: docker
|
||||||
|
buildkit-cfg: bkcfg-true
|
||||||
|
- driver: docker-container
|
||||||
|
multi-node: mnode-true
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Set up QEMU
|
||||||
|
uses: docker/setup-qemu-action@v1
|
||||||
|
if: matrix.driver == 'docker' || matrix.driver == 'docker-container'
|
||||||
|
-
|
||||||
|
name: Install buildx
|
||||||
|
run: |
|
||||||
|
make install
|
||||||
|
docker buildx version
|
||||||
|
-
|
||||||
|
name: Init env vars
|
||||||
|
run: |
|
||||||
|
# BuildKit cfg
|
||||||
|
if [ "${{ matrix.buildkit-cfg }}" = "bkcfg-true" ]; then
|
||||||
|
cat > "/tmp/buildkitd.toml" <<EOL
|
||||||
|
[worker.oci]
|
||||||
|
max-parallelism = 2
|
||||||
|
EOL
|
||||||
|
echo "BUILDKIT_CFG=/tmp/buildkitd.toml" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
# Multi node
|
||||||
|
if [ "${{ matrix.multi-node }}" = "mnode-true" ]; then
|
||||||
|
echo "MULTI_NODE=1" >> $GITHUB_ENV
|
||||||
|
else
|
||||||
|
echo "MULTI_NODE=0" >> $GITHUB_ENV
|
||||||
|
fi
|
||||||
|
-
|
||||||
|
name: Install k3s
|
||||||
|
if: matrix.driver == 'kubernetes'
|
||||||
|
uses: debianmaster/actions-k3s@v1.0.3
|
||||||
|
id: k3s
|
||||||
|
with:
|
||||||
|
version: v1.21.2-k3s1
|
||||||
|
-
|
||||||
|
name: Config k3s
|
||||||
|
if: matrix.driver == 'kubernetes'
|
||||||
|
run: |
|
||||||
|
(set -x ; cat ${{ steps.k3s.outputs.kubeconfig }})
|
||||||
|
-
|
||||||
|
name: Check k3s nodes
|
||||||
|
if: matrix.driver == 'kubernetes'
|
||||||
|
run: |
|
||||||
|
kubectl get nodes
|
||||||
|
-
|
||||||
|
name: Test
|
||||||
|
run: |
|
||||||
|
make test-driver
|
||||||
|
env:
|
||||||
|
BUILDKIT_IMAGE: ${{ matrix.buildkit }}
|
||||||
|
DRIVER: ${{ matrix.driver }}
|
||||||
|
DRIVER_OPT: ${{ matrix.driver-opt }}
|
||||||
|
PLATFORMS: ${{ matrix.platforms }}
|
||||||
26
.github/workflows/validate.yml
vendored
26
.github/workflows/validate.yml
vendored
@@ -5,14 +5,13 @@ on:
|
|||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- 'master'
|
- 'master'
|
||||||
|
- 'v[0-9]*'
|
||||||
tags:
|
tags:
|
||||||
- 'v*'
|
- 'v*'
|
||||||
pull_request:
|
pull_request:
|
||||||
branches:
|
branches:
|
||||||
- 'master'
|
- 'master'
|
||||||
|
- 'v[0-9]*'
|
||||||
env:
|
|
||||||
REPO_SLUG_ORIGIN: "moby/buildkit:master"
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
validate:
|
validate:
|
||||||
@@ -23,16 +22,27 @@ jobs:
|
|||||||
target:
|
target:
|
||||||
- lint
|
- lint
|
||||||
- validate-vendor
|
- validate-vendor
|
||||||
|
- validate-docs
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Checkout
|
name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v1
|
|
||||||
with:
|
|
||||||
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
|
||||||
-
|
-
|
||||||
name: Run
|
name: Run
|
||||||
run: |
|
run: |
|
||||||
make ${{ matrix.target }}
|
make ${{ matrix.target }}
|
||||||
|
|
||||||
|
validate-docs-yaml:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs:
|
||||||
|
- validate
|
||||||
|
steps:
|
||||||
|
-
|
||||||
|
name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
-
|
||||||
|
name: Run
|
||||||
|
run: |
|
||||||
|
make docs
|
||||||
|
env:
|
||||||
|
FORMATS: yaml
|
||||||
|
|||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,3 +1,4 @@
|
|||||||
bin
|
bin
|
||||||
coverage
|
coverage
|
||||||
cross-out
|
cross-out
|
||||||
|
release-out
|
||||||
|
|||||||
30
.golangci.yml
Normal file
30
.golangci.yml
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
run:
|
||||||
|
timeout: 10m
|
||||||
|
skip-files:
|
||||||
|
- ".*\\.pb\\.go$"
|
||||||
|
|
||||||
|
modules-download-mode: vendor
|
||||||
|
|
||||||
|
build-tags:
|
||||||
|
|
||||||
|
linters:
|
||||||
|
enable:
|
||||||
|
- gofmt
|
||||||
|
- govet
|
||||||
|
- deadcode
|
||||||
|
- goimports
|
||||||
|
- ineffassign
|
||||||
|
- misspell
|
||||||
|
- unused
|
||||||
|
- varcheck
|
||||||
|
- golint
|
||||||
|
- staticcheck
|
||||||
|
- typecheck
|
||||||
|
- structcheck
|
||||||
|
disable-all: true
|
||||||
|
|
||||||
|
issues:
|
||||||
|
exclude-rules:
|
||||||
|
- linters:
|
||||||
|
- golint
|
||||||
|
text: "stutters"
|
||||||
9
.mailmap
9
.mailmap
@@ -1,6 +1,13 @@
|
|||||||
# This file lists all individuals having contributed content to the repository.
|
# This file lists all individuals having contributed content to the repository.
|
||||||
# For how it is generated, see `hack/generate-authors`.
|
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
||||||
|
|
||||||
|
CrazyMax <github@crazymax.dev>
|
||||||
|
CrazyMax <github@crazymax.dev> <1951866+crazy-max@users.noreply.github.com>
|
||||||
|
CrazyMax <github@crazymax.dev> <crazy-max@users.noreply.github.com>
|
||||||
|
Sebastiaan van Stijn <github@gone.nl>
|
||||||
|
Sebastiaan van Stijn <github@gone.nl> <thaJeztah@users.noreply.github.com>
|
||||||
Tibor Vass <tibor@docker.com>
|
Tibor Vass <tibor@docker.com>
|
||||||
Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com>
|
Tibor Vass <tibor@docker.com> <tiborvass@users.noreply.github.com>
|
||||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
|
Ulysses Souza <ulyssessouza@gmail.com>
|
||||||
|
Wang Jinglei <morlay.null@gmail.com>
|
||||||
|
|||||||
40
AUTHORS
40
AUTHORS
@@ -1,7 +1,45 @@
|
|||||||
# This file lists all individuals having contributed content to the repository.
|
# This file lists all individuals having contributed content to the repository.
|
||||||
# For how it is generated, see `scripts/generate-authors.sh`.
|
# For how it is generated, see hack/dockerfiles/authors.Dockerfile.
|
||||||
|
|
||||||
|
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
||||||
|
Alex Couture-Beil <alex@earthly.dev>
|
||||||
|
Andrew Haines <andrew.haines@zencargo.com>
|
||||||
|
Andy MacKinlay <admackin@users.noreply.github.com>
|
||||||
|
Anthony Poschen <zanven42@gmail.com>
|
||||||
|
Artur Klauser <Artur.Klauser@computer.org>
|
||||||
|
Batuhan Apaydın <developerguy2@gmail.com>
|
||||||
Bin Du <bindu@microsoft.com>
|
Bin Du <bindu@microsoft.com>
|
||||||
|
Brandon Philips <brandon@ifup.org>
|
||||||
Brian Goff <cpuguy83@gmail.com>
|
Brian Goff <cpuguy83@gmail.com>
|
||||||
|
CrazyMax <github@crazymax.dev>
|
||||||
|
dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
|
||||||
|
Devin Bayer <dev@doubly.so>
|
||||||
|
Djordje Lukic <djordje.lukic@docker.com>
|
||||||
|
Dmytro Makovey <dmytro.makovey@docker.com>
|
||||||
|
Donghui Wang <977675308@qq.com>
|
||||||
|
faust <faustin@fala.red>
|
||||||
|
Felipe Santos <felipecassiors@gmail.com>
|
||||||
|
Fernando Miguel <github@FernandoMiguel.net>
|
||||||
|
gfrancesco <gfrancesco@users.noreply.github.com>
|
||||||
|
gracenoah <gracenoahgh@gmail.com>
|
||||||
|
Hollow Man <hollowman@hollowman.ml>
|
||||||
|
Ilya Dmitrichenko <errordeveloper@gmail.com>
|
||||||
|
Jack Laxson <jackjrabbit@gmail.com>
|
||||||
|
Jean-Yves Gastaud <jygastaud@gmail.com>
|
||||||
|
khs1994 <khs1994@khs1994.com>
|
||||||
|
Kotaro Adachi <k33asby@gmail.com>
|
||||||
|
l00397676 <lujingxiao@huawei.com>
|
||||||
|
Michal Augustyn <michal.augustyn@mail.com>
|
||||||
|
Patrick Van Stee <patrick@vanstee.me>
|
||||||
|
Saul Shanabrook <s.shanabrook@gmail.com>
|
||||||
|
Sebastiaan van Stijn <github@gone.nl>
|
||||||
|
SHIMA Tatsuya <ts1s1andn@gmail.com>
|
||||||
|
Silvin Lubecki <silvin.lubecki@docker.com>
|
||||||
|
Solomon Hykes <sh.github.6811@hykes.org>
|
||||||
|
Sune Keller <absukl@almbrand.dk>
|
||||||
Tibor Vass <tibor@docker.com>
|
Tibor Vass <tibor@docker.com>
|
||||||
Tõnis Tiigi <tonistiigi@gmail.com>
|
Tõnis Tiigi <tonistiigi@gmail.com>
|
||||||
|
Ulysses Souza <ulyssessouza@gmail.com>
|
||||||
|
Wang Jinglei <morlay.null@gmail.com>
|
||||||
|
Xiang Dai <764524258@qq.com>
|
||||||
|
zelahi <elahi.zuhayr@gmail.com>
|
||||||
|
|||||||
57
Dockerfile
57
Dockerfile
@@ -1,15 +1,17 @@
|
|||||||
# syntax=docker/dockerfile:1.1-experimental
|
# syntax=docker/dockerfile:1.3
|
||||||
|
|
||||||
ARG DOCKERD_VERSION=19.03-rc
|
ARG GO_VERSION=1.17
|
||||||
ARG CLI_VERSION=19.03
|
ARG DOCKERD_VERSION=20.10.8
|
||||||
|
|
||||||
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
||||||
|
|
||||||
# xgo is a helper for golang cross-compilation
|
# xx is a helper for cross-compilation
|
||||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:golang@sha256:6f7d999551dd471b58f70716754290495690efa8421e0a1fcf18eb11d0c0a537 AS xgo
|
FROM --platform=$BUILDPLATFORM tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04 AS xx
|
||||||
|
|
||||||
FROM --platform=$BUILDPLATFORM golang:1.13-alpine AS gobase
|
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
|
||||||
COPY --from=xgo / /
|
|
||||||
|
FROM golatest AS gobase
|
||||||
|
COPY --from=xx / /
|
||||||
RUN apk add --no-cache file git
|
RUN apk add --no-cache file git
|
||||||
ENV GOFLAGS=-mod=vendor
|
ENV GOFLAGS=-mod=vendor
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
@@ -23,24 +25,22 @@ RUN --mount=target=. \
|
|||||||
FROM gobase AS buildx-build
|
FROM gobase AS buildx-build
|
||||||
ENV CGO_ENABLED=0
|
ENV CGO_ENABLED=0
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
RUN --mount=target=. --mount=target=/root/.cache,type=cache \
|
RUN --mount=type=bind,target=. \
|
||||||
--mount=target=/go/pkg/mod,type=cache \
|
--mount=type=cache,target=/root/.cache \
|
||||||
--mount=source=/tmp/.ldflags,target=/tmp/.ldflags,from=buildx-version \
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
set -x; go build -ldflags "$(cat /tmp/.ldflags)" -o /usr/bin/buildx ./cmd/buildx && \
|
--mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=buildx-version \
|
||||||
file /usr/bin/buildx && file /usr/bin/buildx | egrep "statically linked|Mach-O|Windows"
|
set -x; xx-go build -ldflags "$(cat /tmp/.ldflags)" -o /usr/bin/buildx ./cmd/buildx && \
|
||||||
|
xx-verify --static /usr/bin/buildx
|
||||||
|
|
||||||
FROM buildx-build AS integration-tests
|
FROM buildx-build AS test
|
||||||
COPY . .
|
RUN --mount=type=bind,target=. \
|
||||||
|
--mount=type=cache,target=/root/.cache \
|
||||||
|
--mount=type=cache,target=/go/pkg/mod \
|
||||||
|
go test -v -coverprofile=/tmp/coverage.txt -covermode=atomic ./... && \
|
||||||
|
go tool cover -func=/tmp/coverage.txt
|
||||||
|
|
||||||
# FROM golang:1.12-alpine AS docker-cli-build
|
FROM scratch AS test-coverage
|
||||||
# RUN apk add -U git bash coreutils gcc musl-dev
|
COPY --from=test /tmp/coverage.txt /coverage.txt
|
||||||
# ENV CGO_ENABLED=0
|
|
||||||
# ARG REPO=github.com/tiborvass/cli
|
|
||||||
# ARG BRANCH=cli-plugin-aliases
|
|
||||||
# ARG CLI_VERSION
|
|
||||||
# WORKDIR /go/src/github.com/docker/cli
|
|
||||||
# RUN git clone git://$REPO . && git checkout $BRANCH
|
|
||||||
# RUN ./scripts/build/binary
|
|
||||||
|
|
||||||
FROM scratch AS binaries-unix
|
FROM scratch AS binaries-unix
|
||||||
COPY --from=buildx-build /usr/bin/buildx /
|
COPY --from=buildx-build /usr/bin/buildx /
|
||||||
@@ -53,28 +53,29 @@ COPY --from=buildx-build /usr/bin/buildx /buildx.exe
|
|||||||
|
|
||||||
FROM binaries-$TARGETOS AS binaries
|
FROM binaries-$TARGETOS AS binaries
|
||||||
|
|
||||||
|
# Release
|
||||||
FROM --platform=$BUILDPLATFORM alpine AS releaser
|
FROM --platform=$BUILDPLATFORM alpine AS releaser
|
||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
RUN --mount=from=binaries \
|
RUN --mount=from=binaries \
|
||||||
--mount=source=/tmp/.version,target=/tmp/.version,from=buildx-version \
|
--mount=type=bind,source=/tmp/.version,target=/tmp/.version,from=buildx-version \
|
||||||
mkdir -p /out && cp buildx* "/out/buildx-$(cat /tmp/.version).$(echo $TARGETPLATFORM | sed 's/\//-/g')$(ls buildx* | sed -e 's/^buildx//')"
|
mkdir -p /out && cp buildx* "/out/buildx-$(cat /tmp/.version).$(echo $TARGETPLATFORM | sed 's/\//-/g')$(ls buildx* | sed -e 's/^buildx//')"
|
||||||
|
|
||||||
FROM scratch AS release
|
FROM scratch AS release
|
||||||
COPY --from=releaser /out/ /
|
COPY --from=releaser /out/ /
|
||||||
|
|
||||||
FROM alpine AS demo-env
|
# Shell
|
||||||
|
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
||||||
|
FROM alpine AS shell
|
||||||
RUN apk add --no-cache iptables tmux git vim less openssh
|
RUN apk add --no-cache iptables tmux git vim less openssh
|
||||||
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
|
RUN mkdir -p /usr/local/lib/docker/cli-plugins && ln -s /usr/local/bin/buildx /usr/local/lib/docker/cli-plugins/docker-buildx
|
||||||
COPY ./hack/demo-env/entrypoint.sh /usr/local/bin
|
COPY ./hack/demo-env/entrypoint.sh /usr/local/bin
|
||||||
COPY ./hack/demo-env/tmux.conf /root/.tmux.conf
|
COPY ./hack/demo-env/tmux.conf /root/.tmux.conf
|
||||||
COPY --from=dockerd-release /usr/local/bin /usr/local/bin
|
COPY --from=dockerd-release /usr/local/bin /usr/local/bin
|
||||||
#COPY --from=docker-cli-build /go/src/github.com/docker/cli/build/docker /usr/local/bin
|
|
||||||
|
|
||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
COPY ./hack/demo-env/examples .
|
COPY ./hack/demo-env/examples .
|
||||||
COPY --from=binaries / /usr/local/bin/
|
COPY --from=binaries / /usr/local/bin/
|
||||||
VOLUME /var/lib/docker
|
VOLUME /var/lib/docker
|
||||||
ENTRYPOINT ["entrypoint.sh"]
|
ENTRYPOINT ["entrypoint.sh"]
|
||||||
|
|
||||||
FROM binaries
|
FROM binaries
|
||||||
|
|||||||
12
MAINTAINERS
12
MAINTAINERS
@@ -150,6 +150,8 @@ made through a pull request.
|
|||||||
[Org.Maintainers]
|
[Org.Maintainers]
|
||||||
|
|
||||||
people = [
|
people = [
|
||||||
|
"akihirosuda",
|
||||||
|
"crazy-max",
|
||||||
"tiborvass",
|
"tiborvass",
|
||||||
"tonistiigi",
|
"tonistiigi",
|
||||||
]
|
]
|
||||||
@@ -176,6 +178,16 @@ made through a pull request.
|
|||||||
# All other sections should refer to people by their canonical key
|
# All other sections should refer to people by their canonical key
|
||||||
# in the people section.
|
# in the people section.
|
||||||
|
|
||||||
|
[people.akihirosuda]
|
||||||
|
Name = "Akihiro Suda"
|
||||||
|
Email = "akihiro.suda.cz@hco.ntt.co.jp"
|
||||||
|
GitHub = "AkihiroSuda"
|
||||||
|
|
||||||
|
[people.crazy-max]
|
||||||
|
Name = "Kevin Alvarez"
|
||||||
|
Email = "contact@crazymax.dev"
|
||||||
|
GitHub = "crazy-max"
|
||||||
|
|
||||||
[people.thajeztah]
|
[people.thajeztah]
|
||||||
Name = "Sebastiaan van Stijn"
|
Name = "Sebastiaan van Stijn"
|
||||||
Email = "github@gone.nl"
|
Email = "github@gone.nl"
|
||||||
|
|||||||
56
Makefile
56
Makefile
@@ -1,34 +1,62 @@
|
|||||||
|
ifneq (, $(BUILDX_BIN))
|
||||||
|
export BUILDX_CMD = $(BUILDX_BIN)
|
||||||
|
else ifneq (, $(shell docker buildx version))
|
||||||
|
export BUILDX_CMD = docker buildx
|
||||||
|
else ifneq (, $(shell which buildx))
|
||||||
|
export BUILDX_CMD = $(which buildx)
|
||||||
|
else
|
||||||
|
$(error "Buildx is required: https://github.com/docker/buildx#installing")
|
||||||
|
endif
|
||||||
|
|
||||||
|
export BIN_OUT = ./bin
|
||||||
|
export RELEASE_OUT = ./release-out
|
||||||
|
|
||||||
shell:
|
shell:
|
||||||
./hack/shell
|
./hack/shell
|
||||||
|
|
||||||
binaries:
|
binaries:
|
||||||
./hack/binaries
|
$(BUILDX_CMD) bake binaries
|
||||||
|
|
||||||
binaries-cross:
|
binaries-cross:
|
||||||
EXPORT_LOCAL=cross-out ./hack/cross
|
$(BUILDX_CMD) bake binaries-cross
|
||||||
|
|
||||||
cross:
|
|
||||||
./hack/cross
|
|
||||||
|
|
||||||
install: binaries
|
install: binaries
|
||||||
mkdir -p ~/.docker/cli-plugins
|
mkdir -p ~/.docker/cli-plugins
|
||||||
cp bin/buildx ~/.docker/cli-plugins/docker-buildx
|
install bin/buildx ~/.docker/cli-plugins/docker-buildx
|
||||||
|
|
||||||
|
release:
|
||||||
|
./hack/release
|
||||||
|
|
||||||
|
validate-all: lint test validate-vendor validate-docs
|
||||||
|
|
||||||
lint:
|
lint:
|
||||||
./hack/lint
|
$(BUILDX_CMD) bake lint
|
||||||
|
|
||||||
test:
|
test:
|
||||||
./hack/test
|
$(BUILDX_CMD) bake test
|
||||||
|
|
||||||
validate-vendor:
|
validate-vendor:
|
||||||
./hack/validate-vendor
|
$(BUILDX_CMD) bake validate-vendor
|
||||||
|
|
||||||
validate-all: lint test validate-vendor
|
validate-docs:
|
||||||
|
$(BUILDX_CMD) bake validate-docs
|
||||||
|
|
||||||
|
validate-authors:
|
||||||
|
$(BUILDX_CMD) bake validate-authors
|
||||||
|
|
||||||
|
test-driver:
|
||||||
|
./hack/test-driver
|
||||||
|
|
||||||
vendor:
|
vendor:
|
||||||
./hack/update-vendor
|
./hack/update-vendor
|
||||||
|
|
||||||
generate-authors:
|
docs:
|
||||||
./hack/generate-authors
|
./hack/update-docs
|
||||||
|
|
||||||
.PHONY: vendor lint shell binaries install binaries-cross validate-all generate-authors
|
authors:
|
||||||
|
$(BUILDX_CMD) bake update-authors
|
||||||
|
|
||||||
|
mod-outdated:
|
||||||
|
$(BUILDX_CMD) bake mod-outdated
|
||||||
|
|
||||||
|
.PHONY: shell binaries binaries-cross install release validate-all lint validate-vendor validate-docs validate-authors vendor docs authors
|
||||||
|
|||||||
441
bake/bake.go
441
bake/bake.go
@@ -2,6 +2,8 @@ package bake
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/csv"
|
||||||
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
@@ -9,7 +11,9 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/bake/hclparser"
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/util/buildflags"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/docker/pkg/urlutil"
|
"github.com/docker/docker/pkg/urlutil"
|
||||||
hcl "github.com/hashicorp/hcl/v2"
|
hcl "github.com/hashicorp/hcl/v2"
|
||||||
@@ -26,6 +30,11 @@ type File struct {
|
|||||||
Data []byte
|
Data []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type Override struct {
|
||||||
|
Value string
|
||||||
|
ArrValue []string
|
||||||
|
}
|
||||||
|
|
||||||
func defaultFilenames() []string {
|
func defaultFilenames() []string {
|
||||||
return []string{
|
return []string{
|
||||||
"docker-compose.yml", // support app
|
"docker-compose.yml", // support app
|
||||||
@@ -58,60 +67,108 @@ func ReadLocalFiles(names []string) ([]File, error) {
|
|||||||
return out, nil
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReadTargets(ctx context.Context, files []File, targets, overrides []string) (map[string]*Target, error) {
|
func ReadTargets(ctx context.Context, files []File, targets, overrides []string, defaults map[string]string) (map[string]*Target, []*Group, error) {
|
||||||
var c Config
|
c, err := ParseFiles(files, defaults)
|
||||||
for _, f := range files {
|
if err != nil {
|
||||||
cfg, err := ParseFile(f.Data, f.Name)
|
return nil, nil, err
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
c = mergeConfig(c, *cfg)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
o, err := c.newOverrides(overrides)
|
o, err := c.newOverrides(overrides)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
m := map[string]*Target{}
|
m := map[string]*Target{}
|
||||||
for _, n := range targets {
|
for _, n := range targets {
|
||||||
for _, n := range c.ResolveGroup(n) {
|
for _, n := range c.ResolveGroup(n) {
|
||||||
t, err := c.ResolveTarget(n, o)
|
t, err := c.ResolveTarget(n, o)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if t != nil {
|
if t != nil {
|
||||||
m[n] = t
|
m[n] = t
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return m, nil
|
return m, c.Groups, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) {
|
||||||
|
defer func() {
|
||||||
|
err = formatHCLError(err, files)
|
||||||
|
}()
|
||||||
|
|
||||||
|
var c Config
|
||||||
|
var fs []*hcl.File
|
||||||
|
for _, f := range files {
|
||||||
|
cfg, isCompose, composeErr := ParseComposeFile(f.Data, f.Name)
|
||||||
|
if isCompose {
|
||||||
|
if composeErr != nil {
|
||||||
|
return nil, composeErr
|
||||||
|
}
|
||||||
|
c = mergeConfig(c, *cfg)
|
||||||
|
c = dedupeConfig(c)
|
||||||
|
}
|
||||||
|
if !isCompose {
|
||||||
|
hf, isHCL, err := ParseHCLFile(f.Data, f.Name)
|
||||||
|
if isHCL {
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
fs = append(fs, hf)
|
||||||
|
} else if composeErr != nil {
|
||||||
|
return nil, fmt.Errorf("failed to parse %s: parsing yaml: %v, parsing hcl: %w", f.Name, composeErr, err)
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fs) > 0 {
|
||||||
|
if err := hclparser.Parse(hcl.MergeFiles(fs), hclparser.Opt{
|
||||||
|
LookupVar: os.LookupEnv,
|
||||||
|
Vars: defaults,
|
||||||
|
}, &c); err.HasErrors() {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func dedupeConfig(c Config) Config {
|
||||||
|
c2 := c
|
||||||
|
c2.Targets = make([]*Target, 0, len(c2.Targets))
|
||||||
|
m := map[string]*Target{}
|
||||||
|
for _, t := range c.Targets {
|
||||||
|
if t2, ok := m[t.Name]; ok {
|
||||||
|
t2.Merge(t)
|
||||||
|
} else {
|
||||||
|
m[t.Name] = t
|
||||||
|
c2.Targets = append(c2.Targets, t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return c2
|
||||||
}
|
}
|
||||||
|
|
||||||
func ParseFile(dt []byte, fn string) (*Config, error) {
|
func ParseFile(dt []byte, fn string) (*Config, error) {
|
||||||
|
return ParseFiles([]File{{Data: dt, Name: fn}}, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ParseComposeFile(dt []byte, fn string) (*Config, bool, error) {
|
||||||
fnl := strings.ToLower(fn)
|
fnl := strings.ToLower(fn)
|
||||||
if strings.HasSuffix(fnl, ".yml") || strings.HasSuffix(fnl, ".yaml") {
|
if strings.HasSuffix(fnl, ".yml") || strings.HasSuffix(fnl, ".yaml") {
|
||||||
return ParseCompose(dt)
|
cfg, err := ParseCompose(dt)
|
||||||
|
return cfg, true, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasSuffix(fnl, ".json") || strings.HasSuffix(fnl, ".hcl") {
|
if strings.HasSuffix(fnl, ".json") || strings.HasSuffix(fnl, ".hcl") {
|
||||||
return ParseHCL(dt, fn)
|
return nil, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg, err := ParseCompose(dt)
|
cfg, err := ParseCompose(dt)
|
||||||
if err != nil {
|
return cfg, err == nil, err
|
||||||
cfg, err2 := ParseHCL(dt, fn)
|
|
||||||
if err2 != nil {
|
|
||||||
return nil, errors.Errorf("failed to parse %s: parsing yaml: %s, parsing hcl: %s", fn, err.Error(), err2.Error())
|
|
||||||
}
|
|
||||||
return cfg, nil
|
|
||||||
}
|
|
||||||
return cfg, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
Variables []*Variable `json:"-" hcl:"variable,block"`
|
Groups []*Group `json:"group" hcl:"group,block"`
|
||||||
Groups []*Group `json:"group" hcl:"group,block"`
|
Targets []*Target `json:"target" hcl:"target,block"`
|
||||||
Targets []*Target `json:"target" hcl:"target,block"`
|
|
||||||
Remain hcl.Body `json:"-" hcl:",remain"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func mergeConfig(c1, c2 Config) Config {
|
func mergeConfig(c1, c2 Config) Config {
|
||||||
@@ -157,7 +214,8 @@ func mergeConfig(c1, c2 Config) Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if t1 != nil {
|
if t1 != nil {
|
||||||
t2 = merge(t1, t2)
|
t1.Merge(t2)
|
||||||
|
t2 = t1
|
||||||
}
|
}
|
||||||
c1.Targets = append(c1.Targets, t2)
|
c1.Targets = append(c1.Targets, t2)
|
||||||
}
|
}
|
||||||
@@ -188,8 +246,8 @@ func (c Config) expandTargets(pattern string) ([]string, error) {
|
|||||||
return names, nil
|
return names, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) newOverrides(v []string) (map[string]*Target, error) {
|
func (c Config) newOverrides(v []string) (map[string]map[string]Override, error) {
|
||||||
m := map[string]*Target{}
|
m := map[string]map[string]Override{}
|
||||||
for _, v := range v {
|
for _, v := range v {
|
||||||
|
|
||||||
parts := strings.SplitN(v, "=", 2)
|
parts := strings.SplitN(v, "=", 2)
|
||||||
@@ -208,73 +266,41 @@ func (c Config) newOverrides(v []string) (map[string]*Target, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kk := strings.SplitN(parts[0], ".", 2)
|
||||||
|
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
t, ok := m[name]
|
t, ok := m[name]
|
||||||
if !ok {
|
if !ok {
|
||||||
t = &Target{}
|
t = map[string]Override{}
|
||||||
|
m[name] = t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
o := t[kk[1]]
|
||||||
|
|
||||||
switch keys[1] {
|
switch keys[1] {
|
||||||
case "context":
|
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh":
|
||||||
t.Context = &parts[1]
|
if len(parts) == 2 {
|
||||||
case "dockerfile":
|
o.ArrValue = append(o.ArrValue, parts[1])
|
||||||
t.Dockerfile = &parts[1]
|
}
|
||||||
case "args":
|
case "args":
|
||||||
if len(keys) != 3 {
|
if len(keys) != 3 {
|
||||||
return nil, errors.Errorf("invalid key %s, args requires name", parts[0])
|
return nil, errors.Errorf("invalid key %s, args requires name", parts[0])
|
||||||
}
|
}
|
||||||
if t.Args == nil {
|
|
||||||
t.Args = map[string]string{}
|
|
||||||
}
|
|
||||||
if len(parts) < 2 {
|
if len(parts) < 2 {
|
||||||
v, ok := os.LookupEnv(keys[2])
|
v, ok := os.LookupEnv(keys[2])
|
||||||
if ok {
|
if !ok {
|
||||||
t.Args[keys[2]] = v
|
continue
|
||||||
}
|
}
|
||||||
} else {
|
o.Value = v
|
||||||
t.Args[keys[2]] = parts[1]
|
|
||||||
}
|
}
|
||||||
case "labels":
|
fallthrough
|
||||||
if len(keys) != 3 {
|
|
||||||
return nil, errors.Errorf("invalid key %s, lanels requires name", parts[0])
|
|
||||||
}
|
|
||||||
if t.Labels == nil {
|
|
||||||
t.Labels = map[string]string{}
|
|
||||||
}
|
|
||||||
t.Labels[keys[2]] = parts[1]
|
|
||||||
case "tags":
|
|
||||||
t.Tags = append(t.Tags, parts[1])
|
|
||||||
case "cache-from":
|
|
||||||
t.CacheFrom = append(t.CacheFrom, parts[1])
|
|
||||||
case "cache-to":
|
|
||||||
t.CacheTo = append(t.CacheTo, parts[1])
|
|
||||||
case "target":
|
|
||||||
s := parts[1]
|
|
||||||
t.Target = &s
|
|
||||||
case "secrets":
|
|
||||||
t.Secrets = append(t.Secrets, parts[1])
|
|
||||||
case "ssh":
|
|
||||||
t.SSH = append(t.SSH, parts[1])
|
|
||||||
case "platform":
|
|
||||||
t.Platforms = append(t.Platforms, parts[1])
|
|
||||||
case "output":
|
|
||||||
t.Outputs = append(t.Outputs, parts[1])
|
|
||||||
case "no-cache":
|
|
||||||
noCache, err := strconv.ParseBool(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Errorf("invalid value %s for boolean key no-cache", parts[1])
|
|
||||||
}
|
|
||||||
t.NoCache = &noCache
|
|
||||||
case "pull":
|
|
||||||
pull, err := strconv.ParseBool(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Errorf("invalid value %s for boolean key pull", parts[1])
|
|
||||||
}
|
|
||||||
t.Pull = &pull
|
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("unknown key: %s", keys[1])
|
if len(parts) == 2 {
|
||||||
|
o.Value = parts[1]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
m[name] = t
|
|
||||||
|
t[kk[1]] = o
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return m, nil
|
return m, nil
|
||||||
@@ -306,7 +332,7 @@ func (c Config) group(name string, visited map[string]struct{}) []string {
|
|||||||
return targets
|
return targets
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) ResolveTarget(name string, overrides map[string]*Target) (*Target, error) {
|
func (c Config) ResolveTarget(name string, overrides map[string]map[string]Override) (*Target, error) {
|
||||||
t, err := c.target(name, map[string]struct{}{}, overrides)
|
t, err := c.target(name, map[string]struct{}{}, overrides)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -322,7 +348,7 @@ func (c Config) ResolveTarget(name string, overrides map[string]*Target) (*Targe
|
|||||||
return t, nil
|
return t, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c Config) target(name string, visited map[string]struct{}, overrides map[string]*Target) (*Target, error) {
|
func (c Config) target(name string, visited map[string]struct{}, overrides map[string]map[string]Override) (*Target, error) {
|
||||||
if _, ok := visited[name]; ok {
|
if _, ok := visited[name]; ok {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
@@ -344,23 +370,22 @@ func (c Config) target(name string, visited map[string]struct{}, overrides map[s
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if t != nil {
|
if t != nil {
|
||||||
tt = merge(tt, t)
|
tt.Merge(t)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
t.Inherits = nil
|
t.Inherits = nil
|
||||||
tt = merge(merge(defaultTarget(), tt), t)
|
m := defaultTarget()
|
||||||
if override, ok := overrides[name]; ok {
|
m.Merge(tt)
|
||||||
tt = merge(tt, override)
|
m.Merge(t)
|
||||||
|
tt = m
|
||||||
|
if err := tt.AddOverrides(overrides[name]); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tt.normalize()
|
tt.normalize()
|
||||||
return tt, nil
|
return tt, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type Variable struct {
|
|
||||||
Name string `json:"-" hcl:"name,label"`
|
|
||||||
Default string `json:"default,omitempty" hcl:"default,optional"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Group struct {
|
type Group struct {
|
||||||
Name string `json:"-" hcl:"name,label"`
|
Name string `json:"-" hcl:"name,label"`
|
||||||
Targets []string `json:"targets" hcl:"targets"`
|
Targets []string `json:"targets" hcl:"targets"`
|
||||||
@@ -402,6 +427,136 @@ func (t *Target) normalize() {
|
|||||||
t.Outputs = removeDupes(t.Outputs)
|
t.Outputs = removeDupes(t.Outputs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t *Target) Merge(t2 *Target) {
|
||||||
|
if t2.Context != nil {
|
||||||
|
t.Context = t2.Context
|
||||||
|
}
|
||||||
|
if t2.Dockerfile != nil {
|
||||||
|
t.Dockerfile = t2.Dockerfile
|
||||||
|
}
|
||||||
|
if t2.DockerfileInline != nil {
|
||||||
|
t.DockerfileInline = t2.DockerfileInline
|
||||||
|
}
|
||||||
|
for k, v := range t2.Args {
|
||||||
|
if t.Args == nil {
|
||||||
|
t.Args = map[string]string{}
|
||||||
|
}
|
||||||
|
t.Args[k] = v
|
||||||
|
}
|
||||||
|
for k, v := range t2.Labels {
|
||||||
|
if t.Labels == nil {
|
||||||
|
t.Labels = map[string]string{}
|
||||||
|
}
|
||||||
|
t.Labels[k] = v
|
||||||
|
}
|
||||||
|
if t2.Tags != nil { // no merge
|
||||||
|
t.Tags = t2.Tags
|
||||||
|
}
|
||||||
|
if t2.Target != nil {
|
||||||
|
t.Target = t2.Target
|
||||||
|
}
|
||||||
|
if t2.Secrets != nil { // merge
|
||||||
|
t.Secrets = append(t.Secrets, t2.Secrets...)
|
||||||
|
}
|
||||||
|
if t2.SSH != nil { // merge
|
||||||
|
t.SSH = append(t.SSH, t2.SSH...)
|
||||||
|
}
|
||||||
|
if t2.Platforms != nil { // no merge
|
||||||
|
t.Platforms = t2.Platforms
|
||||||
|
}
|
||||||
|
if t2.CacheFrom != nil { // merge
|
||||||
|
t.CacheFrom = append(t.CacheFrom, t2.CacheFrom...)
|
||||||
|
}
|
||||||
|
if t2.CacheTo != nil { // no merge
|
||||||
|
t.CacheTo = t2.CacheTo
|
||||||
|
}
|
||||||
|
if t2.Outputs != nil { // no merge
|
||||||
|
t.Outputs = t2.Outputs
|
||||||
|
}
|
||||||
|
if t2.Pull != nil {
|
||||||
|
t.Pull = t2.Pull
|
||||||
|
}
|
||||||
|
if t2.NoCache != nil {
|
||||||
|
t.NoCache = t2.NoCache
|
||||||
|
}
|
||||||
|
t.Inherits = append(t.Inherits, t2.Inherits...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Target) AddOverrides(overrides map[string]Override) error {
|
||||||
|
for key, o := range overrides {
|
||||||
|
value := o.Value
|
||||||
|
keys := strings.SplitN(key, ".", 2)
|
||||||
|
switch keys[0] {
|
||||||
|
case "context":
|
||||||
|
t.Context = &value
|
||||||
|
case "dockerfile":
|
||||||
|
t.Dockerfile = &value
|
||||||
|
case "args":
|
||||||
|
if len(keys) != 2 {
|
||||||
|
return errors.Errorf("args require name")
|
||||||
|
}
|
||||||
|
if t.Args == nil {
|
||||||
|
t.Args = map[string]string{}
|
||||||
|
}
|
||||||
|
t.Args[keys[1]] = value
|
||||||
|
|
||||||
|
case "labels":
|
||||||
|
if len(keys) != 2 {
|
||||||
|
return errors.Errorf("labels require name")
|
||||||
|
}
|
||||||
|
if t.Labels == nil {
|
||||||
|
t.Labels = map[string]string{}
|
||||||
|
}
|
||||||
|
t.Labels[keys[1]] = value
|
||||||
|
case "tags":
|
||||||
|
t.Tags = o.ArrValue
|
||||||
|
case "cache-from":
|
||||||
|
t.CacheFrom = o.ArrValue
|
||||||
|
case "cache-to":
|
||||||
|
t.CacheTo = o.ArrValue
|
||||||
|
case "target":
|
||||||
|
t.Target = &value
|
||||||
|
case "secrets":
|
||||||
|
t.Secrets = o.ArrValue
|
||||||
|
case "ssh":
|
||||||
|
t.SSH = o.ArrValue
|
||||||
|
case "platform":
|
||||||
|
t.Platforms = o.ArrValue
|
||||||
|
case "output":
|
||||||
|
t.Outputs = o.ArrValue
|
||||||
|
case "no-cache":
|
||||||
|
noCache, err := strconv.ParseBool(value)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Errorf("invalid value %s for boolean key no-cache", value)
|
||||||
|
}
|
||||||
|
t.NoCache = &noCache
|
||||||
|
case "pull":
|
||||||
|
pull, err := strconv.ParseBool(value)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Errorf("invalid value %s for boolean key pull", value)
|
||||||
|
}
|
||||||
|
t.Pull = &pull
|
||||||
|
case "push":
|
||||||
|
_, err := strconv.ParseBool(value)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Errorf("invalid value %s for boolean key push", value)
|
||||||
|
}
|
||||||
|
if len(t.Outputs) == 0 {
|
||||||
|
t.Outputs = append(t.Outputs, "type=image,push=true")
|
||||||
|
} else {
|
||||||
|
for i, output := range t.Outputs {
|
||||||
|
if typ := parseOutputType(output); typ == "image" || typ == "registry" {
|
||||||
|
t.Outputs[i] = t.Outputs[i] + ",push=" + value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return errors.Errorf("unknown key: %s", keys[0])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func TargetsToBuildOpt(m map[string]*Target, inp *Input) (map[string]build.Options, error) {
|
func TargetsToBuildOpt(m map[string]*Target, inp *Input) (map[string]build.Options, error) {
|
||||||
m2 := make(map[string]build.Options, len(m))
|
m2 := make(map[string]build.Options, len(m))
|
||||||
for k, v := range m {
|
for k, v := range m {
|
||||||
@@ -422,6 +577,12 @@ func updateContext(t *build.Inputs, inp *Input) {
|
|||||||
t.ContextPath = inp.URL
|
t.ContextPath = inp.URL
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if strings.HasPrefix(t.ContextPath, "cwd://") {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if IsRemoteURL(t.ContextPath) {
|
||||||
|
return
|
||||||
|
}
|
||||||
st := llb.Scratch().File(llb.Copy(*inp.State, t.ContextPath, "/"), llb.WithCustomNamef("set context to %s", t.ContextPath))
|
st := llb.Scratch().File(llb.Copy(*inp.State, t.ContextPath, "/"), llb.WithCustomNamef("set context to %s", t.ContextPath))
|
||||||
t.ContextState = &st
|
t.ContextState = &st
|
||||||
}
|
}
|
||||||
@@ -438,7 +599,9 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
if t.Context != nil {
|
if t.Context != nil {
|
||||||
contextPath = *t.Context
|
contextPath = *t.Context
|
||||||
}
|
}
|
||||||
contextPath = path.Clean(contextPath)
|
if !strings.HasPrefix(contextPath, "cwd://") && !IsRemoteURL(contextPath) {
|
||||||
|
contextPath = path.Clean(contextPath)
|
||||||
|
}
|
||||||
dockerfilePath := "Dockerfile"
|
dockerfilePath := "Dockerfile"
|
||||||
if t.Dockerfile != nil {
|
if t.Dockerfile != nil {
|
||||||
dockerfilePath = *t.Dockerfile
|
dockerfilePath = *t.Dockerfile
|
||||||
@@ -465,6 +628,11 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
bi.DockerfileInline = *t.DockerfileInline
|
bi.DockerfileInline = *t.DockerfileInline
|
||||||
}
|
}
|
||||||
updateContext(&bi, inp)
|
updateContext(&bi, inp)
|
||||||
|
if strings.HasPrefix(bi.ContextPath, "cwd://") {
|
||||||
|
bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://"))
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Context = &bi.ContextPath
|
||||||
|
|
||||||
bo := &build.Options{
|
bo := &build.Options{
|
||||||
Inputs: bi,
|
Inputs: bi,
|
||||||
@@ -483,13 +651,17 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
|
|
||||||
bo.Session = append(bo.Session, authprovider.NewDockerAuthProvider(os.Stderr))
|
bo.Session = append(bo.Session, authprovider.NewDockerAuthProvider(os.Stderr))
|
||||||
|
|
||||||
secrets, err := build.ParseSecretSpecs(t.Secrets)
|
secrets, err := buildflags.ParseSecretSpecs(t.Secrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
bo.Session = append(bo.Session, secrets)
|
bo.Session = append(bo.Session, secrets)
|
||||||
|
|
||||||
ssh, err := build.ParseSSHSpecs(t.SSH)
|
sshSpecs := t.SSH
|
||||||
|
if len(sshSpecs) == 0 && buildflags.IsGitSSH(contextPath) {
|
||||||
|
sshSpecs = []string{"default"}
|
||||||
|
}
|
||||||
|
ssh, err := buildflags.ParseSSHSpecs(sshSpecs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -499,19 +671,19 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
|||||||
bo.Target = *t.Target
|
bo.Target = *t.Target
|
||||||
}
|
}
|
||||||
|
|
||||||
cacheImports, err := build.ParseCacheEntry(t.CacheFrom)
|
cacheImports, err := buildflags.ParseCacheEntry(t.CacheFrom)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
bo.CacheFrom = cacheImports
|
bo.CacheFrom = cacheImports
|
||||||
|
|
||||||
cacheExports, err := build.ParseCacheEntry(t.CacheTo)
|
cacheExports, err := buildflags.ParseCacheEntry(t.CacheTo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
bo.CacheTo = cacheExports
|
bo.CacheTo = cacheExports
|
||||||
|
|
||||||
outputs, err := build.ParseOutputs(t.Outputs)
|
outputs, err := buildflags.ParseOutputs(t.Outputs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -524,62 +696,6 @@ func defaultTarget() *Target {
|
|||||||
return &Target{}
|
return &Target{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func merge(t1, t2 *Target) *Target {
|
|
||||||
if t2.Context != nil {
|
|
||||||
t1.Context = t2.Context
|
|
||||||
}
|
|
||||||
if t2.Dockerfile != nil {
|
|
||||||
t1.Dockerfile = t2.Dockerfile
|
|
||||||
}
|
|
||||||
if t2.DockerfileInline != nil {
|
|
||||||
t1.DockerfileInline = t2.DockerfileInline
|
|
||||||
}
|
|
||||||
for k, v := range t2.Args {
|
|
||||||
if t1.Args == nil {
|
|
||||||
t1.Args = map[string]string{}
|
|
||||||
}
|
|
||||||
t1.Args[k] = v
|
|
||||||
}
|
|
||||||
for k, v := range t2.Labels {
|
|
||||||
if t1.Labels == nil {
|
|
||||||
t1.Labels = map[string]string{}
|
|
||||||
}
|
|
||||||
t1.Labels[k] = v
|
|
||||||
}
|
|
||||||
if t2.Tags != nil { // no merge
|
|
||||||
t1.Tags = t2.Tags
|
|
||||||
}
|
|
||||||
if t2.Target != nil {
|
|
||||||
t1.Target = t2.Target
|
|
||||||
}
|
|
||||||
if t2.Secrets != nil { // merge
|
|
||||||
t1.Secrets = append(t1.Secrets, t2.Secrets...)
|
|
||||||
}
|
|
||||||
if t2.SSH != nil { // merge
|
|
||||||
t1.SSH = append(t1.SSH, t2.SSH...)
|
|
||||||
}
|
|
||||||
if t2.Platforms != nil { // no merge
|
|
||||||
t1.Platforms = t2.Platforms
|
|
||||||
}
|
|
||||||
if t2.CacheFrom != nil { // no merge
|
|
||||||
t1.CacheFrom = append(t1.CacheFrom, t2.CacheFrom...)
|
|
||||||
}
|
|
||||||
if t2.CacheTo != nil { // no merge
|
|
||||||
t1.CacheTo = t2.CacheTo
|
|
||||||
}
|
|
||||||
if t2.Outputs != nil { // no merge
|
|
||||||
t1.Outputs = t2.Outputs
|
|
||||||
}
|
|
||||||
if t2.Pull != nil {
|
|
||||||
t1.Pull = t2.Pull
|
|
||||||
}
|
|
||||||
if t2.NoCache != nil {
|
|
||||||
t1.NoCache = t2.NoCache
|
|
||||||
}
|
|
||||||
t1.Inherits = append(t1.Inherits, t2.Inherits...)
|
|
||||||
return t1
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeDupes(s []string) []string {
|
func removeDupes(s []string) []string {
|
||||||
i := 0
|
i := 0
|
||||||
seen := make(map[string]struct{}, len(s))
|
seen := make(map[string]struct{}, len(s))
|
||||||
@@ -600,3 +716,20 @@ func removeDupes(s []string) []string {
|
|||||||
func isRemoteResource(str string) bool {
|
func isRemoteResource(str string) bool {
|
||||||
return urlutil.IsGitURL(str) || urlutil.IsURL(str)
|
return urlutil.IsGitURL(str) || urlutil.IsURL(str)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseOutputType(str string) string {
|
||||||
|
csvReader := csv.NewReader(strings.NewReader(str))
|
||||||
|
fields, err := csvReader.Read()
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
for _, field := range fields {
|
||||||
|
parts := strings.SplitN(field, "=", 2)
|
||||||
|
if len(parts) == 2 {
|
||||||
|
if parts[0] == "type" {
|
||||||
|
return parts[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ target "webapp" {
|
|||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
|
|
||||||
t.Run("NoOverrides", func(t *testing.T) {
|
t.Run("NoOverrides", func(t *testing.T) {
|
||||||
m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, nil)
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, 1, len(m))
|
require.Equal(t, 1, len(m))
|
||||||
|
|
||||||
@@ -46,7 +46,7 @@ target "webapp" {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("InvalidTargetOverrides", func(t *testing.T) {
|
t.Run("InvalidTargetOverrides", func(t *testing.T) {
|
||||||
_, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"nosuchtarget.context=foo"})
|
_, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"nosuchtarget.context=foo"}, nil)
|
||||||
require.NotNil(t, err)
|
require.NotNil(t, err)
|
||||||
require.Equal(t, err.Error(), "could not find any target matching 'nosuchtarget'")
|
require.Equal(t, err.Error(), "could not find any target matching 'nosuchtarget'")
|
||||||
})
|
})
|
||||||
@@ -56,14 +56,14 @@ target "webapp" {
|
|||||||
os.Setenv("VAR_FROMENV"+t.Name(), "fromEnv")
|
os.Setenv("VAR_FROMENV"+t.Name(), "fromEnv")
|
||||||
defer os.Unsetenv("VAR_FROM_ENV" + t.Name())
|
defer os.Unsetenv("VAR_FROM_ENV" + t.Name())
|
||||||
|
|
||||||
m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{
|
||||||
"webapp.args.VAR_UNSET",
|
"webapp.args.VAR_UNSET",
|
||||||
"webapp.args.VAR_EMPTY=",
|
"webapp.args.VAR_EMPTY=",
|
||||||
"webapp.args.VAR_SET=bananas",
|
"webapp.args.VAR_SET=bananas",
|
||||||
"webapp.args.VAR_FROMENV" + t.Name(),
|
"webapp.args.VAR_FROMENV" + t.Name(),
|
||||||
"webapp.args.VAR_INHERITED=override",
|
"webapp.args.VAR_INHERITED=override",
|
||||||
// not overriding VAR_BOTH on purpose
|
// not overriding VAR_BOTH on purpose
|
||||||
})
|
}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, "Dockerfile.webapp", *m["webapp"].Dockerfile)
|
require.Equal(t, "Dockerfile.webapp", *m["webapp"].Dockerfile)
|
||||||
@@ -85,10 +85,10 @@ target "webapp" {
|
|||||||
|
|
||||||
// building leaf but overriding parent fields
|
// building leaf but overriding parent fields
|
||||||
t.Run("parent", func(t *testing.T) {
|
t.Run("parent", func(t *testing.T) {
|
||||||
m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{
|
||||||
"webDEP.args.VAR_INHERITED=override",
|
"webDEP.args.VAR_INHERITED=override",
|
||||||
"webDEP.args.VAR_BOTH=override",
|
"webDEP.args.VAR_BOTH=override",
|
||||||
})
|
}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, m["webapp"].Args["VAR_INHERITED"], "override")
|
require.Equal(t, m["webapp"].Args["VAR_INHERITED"], "override")
|
||||||
require.Equal(t, m["webapp"].Args["VAR_BOTH"], "webapp")
|
require.Equal(t, m["webapp"].Args["VAR_BOTH"], "webapp")
|
||||||
@@ -96,23 +96,23 @@ target "webapp" {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("ContextOverride", func(t *testing.T) {
|
t.Run("ContextOverride", func(t *testing.T) {
|
||||||
_, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context"})
|
_, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context"}, nil)
|
||||||
require.NotNil(t, err)
|
require.NotNil(t, err)
|
||||||
|
|
||||||
m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context=foo"})
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context=foo"}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, "foo", *m["webapp"].Context)
|
require.Equal(t, "foo", *m["webapp"].Context)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("NoCacheOverride", func(t *testing.T) {
|
t.Run("NoCacheOverride", func(t *testing.T) {
|
||||||
m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.no-cache=false"})
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.no-cache=false"}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, false, *m["webapp"].NoCache)
|
require.Equal(t, false, *m["webapp"].NoCache)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("PullOverride", func(t *testing.T) {
|
t.Run("PullOverride", func(t *testing.T) {
|
||||||
m, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.pull=false"})
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.pull=false"}, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, false, *m["webapp"].Pull)
|
require.Equal(t, false, *m["webapp"].Pull)
|
||||||
})
|
})
|
||||||
@@ -172,13 +172,58 @@ target "webapp" {
|
|||||||
}
|
}
|
||||||
for _, test := range cases {
|
for _, test := range cases {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
m, err := ReadTargets(ctx, []File{fp}, test.targets, test.overrides)
|
m, _, err := ReadTargets(ctx, []File{fp}, test.targets, test.overrides, nil)
|
||||||
test.check(t, m, err)
|
test.check(t, m, err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPushOverride(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
|
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hc",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
output = ["type=image,compression=zstd"]
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
ctx := context.TODO()
|
||||||
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"*.push=true"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
|
require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0])
|
||||||
|
|
||||||
|
fp = File{
|
||||||
|
Name: "docker-bake.hc",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
output = ["type=image,compression=zstd"]
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
ctx = context.TODO()
|
||||||
|
m, _, err = ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"*.push=false"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
|
require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0])
|
||||||
|
|
||||||
|
fp = File{
|
||||||
|
Name: "docker-bake.hc",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
ctx = context.TODO()
|
||||||
|
m, _, err = ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"*.push=true"}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
|
require.Equal(t, "type=image,push=true", m["app"].Outputs[0])
|
||||||
|
}
|
||||||
|
|
||||||
func TestReadTargetsCompose(t *testing.T) {
|
func TestReadTargetsCompose(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
@@ -215,7 +260,7 @@ services:
|
|||||||
|
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
|
|
||||||
m, err := ReadTargets(ctx, []File{fp, fp2}, []string{"default"}, nil)
|
m, _, err := ReadTargets(ctx, []File{fp, fp2}, []string{"default"}, nil, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 3, len(m))
|
require.Equal(t, 3, len(m))
|
||||||
@@ -226,3 +271,56 @@ services:
|
|||||||
require.Equal(t, "1", m["webapp"].Args["buildno"])
|
require.Equal(t, "1", m["webapp"].Args["buildno"])
|
||||||
require.Equal(t, "12", m["webapp"].Args["buildno2"])
|
require.Equal(t, "12", m["webapp"].Args["buildno2"])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHCLCwdPrefix(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
context = "cwd://foo"
|
||||||
|
dockerfile = "test"
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
ctx := context.TODO()
|
||||||
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(m))
|
||||||
|
_, ok := m["app"]
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
_, err = TargetsToBuildOpt(m, &Input{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, "test", *m["app"].Dockerfile)
|
||||||
|
require.Equal(t, "foo", *m["app"].Context)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestOverrideMerge(t *testing.T) {
|
||||||
|
fp := File{
|
||||||
|
Name: "docker-bake.hcl",
|
||||||
|
Data: []byte(
|
||||||
|
`target "app" {
|
||||||
|
platforms = ["linux/amd64"]
|
||||||
|
output = ["foo"]
|
||||||
|
}`),
|
||||||
|
}
|
||||||
|
ctx := context.TODO()
|
||||||
|
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{
|
||||||
|
"app.platform=linux/arm",
|
||||||
|
"app.platform=linux/ppc64le",
|
||||||
|
"app.output=type=registry",
|
||||||
|
}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(m))
|
||||||
|
_, ok := m["app"]
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
_, err = TargetsToBuildOpt(m, &Input{})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, []string{"linux/arm", "linux/ppc64le"}, m["app"].Platforms)
|
||||||
|
require.Equal(t, 1, len(m["app"].Outputs))
|
||||||
|
require.Equal(t, "type=registry", m["app"].Outputs[0])
|
||||||
|
}
|
||||||
|
|||||||
132
bake/compose.go
132
bake/compose.go
@@ -6,22 +6,20 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/cli/cli/compose/loader"
|
"github.com/compose-spec/compose-go/loader"
|
||||||
composetypes "github.com/docker/cli/cli/compose/types"
|
compose "github.com/compose-spec/compose-go/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func parseCompose(dt []byte) (*composetypes.Config, error) {
|
func parseCompose(dt []byte) (*compose.Project, error) {
|
||||||
parsed, err := loader.ParseYAML([]byte(dt))
|
return loader.Load(compose.ConfigDetails{
|
||||||
if err != nil {
|
ConfigFiles: []compose.ConfigFile{
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return loader.Load(composetypes.ConfigDetails{
|
|
||||||
ConfigFiles: []composetypes.ConfigFile{
|
|
||||||
{
|
{
|
||||||
Config: parsed,
|
Content: dt,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Environment: envMap(os.Environ()),
|
Environment: envMap(os.Environ()),
|
||||||
|
}, func(options *loader.Options) {
|
||||||
|
options.SkipNormalization = true
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -44,7 +42,7 @@ func ParseCompose(dt []byte) (*Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var c Config
|
var c Config
|
||||||
var zeroBuildConfig composetypes.BuildConfig
|
var zeroBuildConfig compose.BuildConfig
|
||||||
if len(cfg.Services) > 0 {
|
if len(cfg.Services) > 0 {
|
||||||
c.Groups = []*Group{}
|
c.Groups = []*Group{}
|
||||||
c.Targets = []*Target{}
|
c.Targets = []*Target{}
|
||||||
@@ -53,10 +51,10 @@ func ParseCompose(dt []byte) (*Config, error) {
|
|||||||
|
|
||||||
for _, s := range cfg.Services {
|
for _, s := range cfg.Services {
|
||||||
|
|
||||||
if reflect.DeepEqual(s.Build, zeroBuildConfig) {
|
if s.Build == nil || reflect.DeepEqual(s.Build, zeroBuildConfig) {
|
||||||
// if not make sure they're setting an image or it's invalid d-c.yml
|
// if not make sure they're setting an image or it's invalid d-c.yml
|
||||||
if s.Image == "" {
|
if s.Image == "" {
|
||||||
return nil, fmt.Errorf("compose file invalid: service %s has neither an image nor a build context specified. At least one must be provided.", s.Name)
|
return nil, fmt.Errorf("compose file invalid: service %s has neither an image nor a build context specified. At least one must be provided", s.Name)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -77,15 +75,20 @@ func ParseCompose(dt []byte) (*Config, error) {
|
|||||||
Context: contextPathP,
|
Context: contextPathP,
|
||||||
Dockerfile: dockerfilePathP,
|
Dockerfile: dockerfilePathP,
|
||||||
Labels: s.Build.Labels,
|
Labels: s.Build.Labels,
|
||||||
Args: toMap(s.Build.Args),
|
Args: flatten(s.Build.Args.Resolve(func(val string) (string, bool) {
|
||||||
CacheFrom: s.Build.CacheFrom,
|
val, ok := cfg.Environment[val]
|
||||||
// TODO: add platforms
|
return val, ok
|
||||||
|
})),
|
||||||
|
CacheFrom: s.Build.CacheFrom,
|
||||||
|
}
|
||||||
|
if err = t.composeExtTarget(s.Build.Extensions); err != nil {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
if s.Build.Target != "" {
|
if s.Build.Target != "" {
|
||||||
target := s.Build.Target
|
target := s.Build.Target
|
||||||
t.Target = &target
|
t.Target = &target
|
||||||
}
|
}
|
||||||
if s.Image != "" {
|
if len(t.Tags) == 0 && s.Image != "" {
|
||||||
t.Tags = []string{s.Image}
|
t.Tags = []string{s.Image}
|
||||||
}
|
}
|
||||||
c.Targets = append(c.Targets, t)
|
c.Targets = append(c.Targets, t)
|
||||||
@@ -97,14 +100,95 @@ func ParseCompose(dt []byte) (*Config, error) {
|
|||||||
return &c, nil
|
return &c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func toMap(in composetypes.MappingWithEquals) map[string]string {
|
func flatten(in compose.MappingWithEquals) compose.Mapping {
|
||||||
m := map[string]string{}
|
if len(in) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := compose.Mapping{}
|
||||||
for k, v := range in {
|
for k, v := range in {
|
||||||
if v != nil {
|
if v == nil {
|
||||||
m[k] = *v
|
continue
|
||||||
} else {
|
}
|
||||||
m[k] = os.Getenv(k)
|
out[k] = *v
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// composeExtTarget converts Compose build extension x-bake to bake Target
|
||||||
|
// https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension
|
||||||
|
func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
||||||
|
if ext, ok := exts["x-bake"]; ok {
|
||||||
|
for key, val := range ext.(map[string]interface{}) {
|
||||||
|
switch key {
|
||||||
|
case "tags":
|
||||||
|
if res, k := val.(string); k {
|
||||||
|
t.Tags = append(t.Tags, res)
|
||||||
|
} else {
|
||||||
|
for _, res := range val.([]interface{}) {
|
||||||
|
t.Tags = append(t.Tags, res.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "cache-from":
|
||||||
|
t.CacheFrom = []string{} // Needed to override the main field
|
||||||
|
if res, k := val.(string); k {
|
||||||
|
t.CacheFrom = append(t.CacheFrom, res)
|
||||||
|
} else {
|
||||||
|
for _, res := range val.([]interface{}) {
|
||||||
|
t.CacheFrom = append(t.CacheFrom, res.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "cache-to":
|
||||||
|
if res, k := val.(string); k {
|
||||||
|
t.CacheTo = append(t.CacheTo, res)
|
||||||
|
} else {
|
||||||
|
for _, res := range val.([]interface{}) {
|
||||||
|
t.CacheTo = append(t.CacheTo, res.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "secret":
|
||||||
|
if res, k := val.(string); k {
|
||||||
|
t.Secrets = append(t.Secrets, res)
|
||||||
|
} else {
|
||||||
|
for _, res := range val.([]interface{}) {
|
||||||
|
t.Secrets = append(t.Secrets, res.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "ssh":
|
||||||
|
if res, k := val.(string); k {
|
||||||
|
t.SSH = append(t.SSH, res)
|
||||||
|
} else {
|
||||||
|
for _, res := range val.([]interface{}) {
|
||||||
|
t.SSH = append(t.SSH, res.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "platforms":
|
||||||
|
if res, k := val.(string); k {
|
||||||
|
t.Platforms = append(t.Platforms, res)
|
||||||
|
} else {
|
||||||
|
for _, res := range val.([]interface{}) {
|
||||||
|
t.Platforms = append(t.Platforms, res.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "output":
|
||||||
|
if res, k := val.(string); k {
|
||||||
|
t.Outputs = append(t.Outputs, res)
|
||||||
|
} else {
|
||||||
|
for _, res := range val.([]interface{}) {
|
||||||
|
t.Outputs = append(t.Outputs, res.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "pull":
|
||||||
|
if res, ok := val.(bool); ok {
|
||||||
|
t.Pull = &res
|
||||||
|
}
|
||||||
|
case "no-cache":
|
||||||
|
if res, ok := val.(bool); ok {
|
||||||
|
t.NoCache = &res
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("compose file invalid: unkwown %s field for x-bake", key)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return m
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package bake
|
package bake
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
@@ -9,8 +10,6 @@ import (
|
|||||||
|
|
||||||
func TestParseCompose(t *testing.T) {
|
func TestParseCompose(t *testing.T) {
|
||||||
var dt = []byte(`
|
var dt = []byte(`
|
||||||
version: "3"
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
build: ./db
|
build: ./db
|
||||||
@@ -48,8 +47,6 @@ services:
|
|||||||
|
|
||||||
func TestNoBuildOutOfTreeService(t *testing.T) {
|
func TestNoBuildOutOfTreeService(t *testing.T) {
|
||||||
var dt = []byte(`
|
var dt = []byte(`
|
||||||
version: "3.7"
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
external:
|
external:
|
||||||
image: "verycooldb:1337"
|
image: "verycooldb:1337"
|
||||||
@@ -63,8 +60,6 @@ services:
|
|||||||
|
|
||||||
func TestParseComposeTarget(t *testing.T) {
|
func TestParseComposeTarget(t *testing.T) {
|
||||||
var dt = []byte(`
|
var dt = []byte(`
|
||||||
version: "3.7"
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
build:
|
build:
|
||||||
@@ -91,8 +86,6 @@ services:
|
|||||||
|
|
||||||
func TestComposeBuildWithoutContext(t *testing.T) {
|
func TestComposeBuildWithoutContext(t *testing.T) {
|
||||||
var dt = []byte(`
|
var dt = []byte(`
|
||||||
version: "3.7"
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
build:
|
build:
|
||||||
@@ -115,10 +108,37 @@ services:
|
|||||||
require.Equal(t, "webapp", *c.Targets[1].Target)
|
require.Equal(t, "webapp", *c.Targets[1].Target)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestBuildArgEnvCompose(t *testing.T) {
|
||||||
|
var dt = []byte(`
|
||||||
|
version: "3.8"
|
||||||
|
services:
|
||||||
|
example:
|
||||||
|
image: example
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
args:
|
||||||
|
FOO:
|
||||||
|
BAR: $ZZZ_BAR
|
||||||
|
BRB: FOO
|
||||||
|
`)
|
||||||
|
|
||||||
|
os.Setenv("FOO", "bar")
|
||||||
|
defer os.Unsetenv("FOO")
|
||||||
|
os.Setenv("BAR", "foo")
|
||||||
|
defer os.Unsetenv("BAR")
|
||||||
|
os.Setenv("ZZZ_BAR", "zzz_foo")
|
||||||
|
defer os.Unsetenv("ZZZ_BAR")
|
||||||
|
|
||||||
|
c, err := ParseCompose(dt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, c.Targets[0].Args["FOO"], "bar")
|
||||||
|
require.Equal(t, c.Targets[0].Args["BAR"], "zzz_foo")
|
||||||
|
require.Equal(t, c.Targets[0].Args["BRB"], "FOO")
|
||||||
|
}
|
||||||
|
|
||||||
func TestBogusCompose(t *testing.T) {
|
func TestBogusCompose(t *testing.T) {
|
||||||
var dt = []byte(`
|
var dt = []byte(`
|
||||||
version: "3.7"
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
db:
|
db:
|
||||||
labels:
|
labels:
|
||||||
@@ -131,5 +151,133 @@ services:
|
|||||||
|
|
||||||
_, err := ParseCompose(dt)
|
_, err := ParseCompose(dt)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
require.Contains(t, err.Error(), "has neither an image nor a build context specified. At least one must be provided")
|
require.Contains(t, err.Error(), "has neither an image nor a build context specified: invalid compose project")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAdvancedNetwork(t *testing.T) {
|
||||||
|
var dt = []byte(`
|
||||||
|
services:
|
||||||
|
db:
|
||||||
|
networks:
|
||||||
|
- example.com
|
||||||
|
build:
|
||||||
|
context: ./db
|
||||||
|
target: db
|
||||||
|
|
||||||
|
networks:
|
||||||
|
example.com:
|
||||||
|
name: example.com
|
||||||
|
driver: bridge
|
||||||
|
ipam:
|
||||||
|
config:
|
||||||
|
- subnet: 10.5.0.0/24
|
||||||
|
ip_range: 10.5.0.0/24
|
||||||
|
gateway: 10.5.0.254
|
||||||
|
`)
|
||||||
|
|
||||||
|
_, err := ParseCompose(dt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDependsOnList(t *testing.T) {
|
||||||
|
var dt = []byte(`
|
||||||
|
version: "3.8"
|
||||||
|
|
||||||
|
services:
|
||||||
|
example-container:
|
||||||
|
image: example/fails:latest
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
depends_on:
|
||||||
|
other-container:
|
||||||
|
condition: service_healthy
|
||||||
|
networks:
|
||||||
|
default:
|
||||||
|
aliases:
|
||||||
|
- integration-tests
|
||||||
|
|
||||||
|
other-container:
|
||||||
|
image: example/other:latest
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD", "echo", "success"]
|
||||||
|
retries: 5
|
||||||
|
interval: 5s
|
||||||
|
timeout: 10s
|
||||||
|
start_period: 5s
|
||||||
|
|
||||||
|
networks:
|
||||||
|
default:
|
||||||
|
name: test-net
|
||||||
|
`)
|
||||||
|
|
||||||
|
_, err := ParseCompose(dt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestComposeExt(t *testing.T) {
|
||||||
|
var dt = []byte(`
|
||||||
|
services:
|
||||||
|
addon:
|
||||||
|
image: ct-addon:bar
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: ./Dockerfile
|
||||||
|
cache_from:
|
||||||
|
- user/app:cache
|
||||||
|
args:
|
||||||
|
CT_ECR: foo
|
||||||
|
CT_TAG: bar
|
||||||
|
x-bake:
|
||||||
|
tags:
|
||||||
|
- ct-addon:foo
|
||||||
|
- ct-addon:alp
|
||||||
|
platforms:
|
||||||
|
- linux/amd64
|
||||||
|
- linux/arm64
|
||||||
|
cache-from:
|
||||||
|
- type=local,src=path/to/cache
|
||||||
|
cache-to: local,dest=path/to/cache
|
||||||
|
pull: true
|
||||||
|
|
||||||
|
aws:
|
||||||
|
image: ct-fake-aws:bar
|
||||||
|
build:
|
||||||
|
dockerfile: ./aws.Dockerfile
|
||||||
|
args:
|
||||||
|
CT_ECR: foo
|
||||||
|
CT_TAG: bar
|
||||||
|
x-bake:
|
||||||
|
secret:
|
||||||
|
- id=mysecret,src=/local/secret
|
||||||
|
- id=mysecret2,src=/local/secret2
|
||||||
|
ssh: default
|
||||||
|
platforms: linux/arm64
|
||||||
|
output: type=docker
|
||||||
|
no-cache: true
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseCompose(dt)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 2, len(c.Targets))
|
||||||
|
sort.Slice(c.Targets, func(i, j int) bool {
|
||||||
|
return c.Targets[i].Name < c.Targets[j].Name
|
||||||
|
})
|
||||||
|
require.Equal(t, c.Targets[0].Args, map[string]string{"CT_ECR": "foo", "CT_TAG": "bar"})
|
||||||
|
require.Equal(t, c.Targets[0].Tags, []string{"ct-addon:foo", "ct-addon:alp"})
|
||||||
|
require.Equal(t, c.Targets[0].Platforms, []string{"linux/amd64", "linux/arm64"})
|
||||||
|
require.Equal(t, c.Targets[0].CacheFrom, []string{"type=local,src=path/to/cache"})
|
||||||
|
require.Equal(t, c.Targets[0].CacheTo, []string{"local,dest=path/to/cache"})
|
||||||
|
require.Equal(t, c.Targets[0].Pull, newBool(true))
|
||||||
|
require.Equal(t, c.Targets[1].Tags, []string{"ct-fake-aws:bar"})
|
||||||
|
require.Equal(t, c.Targets[1].Secrets, []string{"id=mysecret,src=/local/secret", "id=mysecret2,src=/local/secret2"})
|
||||||
|
require.Equal(t, c.Targets[1].SSH, []string{"default"})
|
||||||
|
require.Equal(t, c.Targets[1].Platforms, []string{"linux/arm64"})
|
||||||
|
require.Equal(t, c.Targets[1].Outputs, []string{"type=docker"})
|
||||||
|
require.Equal(t, c.Targets[1].NoCache, newBool(true))
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBool(val bool) *bool {
|
||||||
|
b := val
|
||||||
|
return &b
|
||||||
}
|
}
|
||||||
|
|||||||
205
bake/hcl.go
205
bake/hcl.go
@@ -1,200 +1,42 @@
|
|||||||
package bake
|
package bake
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
hcl "github.com/hashicorp/hcl/v2"
|
hcl "github.com/hashicorp/hcl/v2"
|
||||||
"github.com/hashicorp/hcl/v2/ext/userfunc"
|
"github.com/hashicorp/hcl/v2/hclparse"
|
||||||
"github.com/hashicorp/hcl/v2/hclsimple"
|
|
||||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
|
||||||
"github.com/hashicorp/hcl/v2/json"
|
|
||||||
"github.com/moby/buildkit/solver/errdefs"
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
"github.com/moby/buildkit/solver/pb"
|
"github.com/moby/buildkit/solver/pb"
|
||||||
"github.com/zclconf/go-cty/cty"
|
|
||||||
"github.com/zclconf/go-cty/cty/function"
|
|
||||||
"github.com/zclconf/go-cty/cty/function/stdlib"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Collection of generally useful functions in cty-using applications, which
|
func ParseHCLFile(dt []byte, fn string) (*hcl.File, bool, error) {
|
||||||
// HCL supports. These functions are available for use in HCL files.
|
var err error
|
||||||
var (
|
if strings.HasSuffix(fn, ".json") {
|
||||||
stdlibFunctions = map[string]function.Function{
|
f, diags := hclparse.NewParser().ParseJSON(dt, fn)
|
||||||
"absolute": stdlib.AbsoluteFunc,
|
if diags.HasErrors() {
|
||||||
"add": stdlib.AddFunc,
|
err = diags
|
||||||
"and": stdlib.AndFunc,
|
|
||||||
"byteslen": stdlib.BytesLenFunc,
|
|
||||||
"bytesslice": stdlib.BytesSliceFunc,
|
|
||||||
"chomp": stdlib.ChompFunc,
|
|
||||||
"chunklist": stdlib.ChunklistFunc,
|
|
||||||
"ceil": stdlib.CeilFunc,
|
|
||||||
"csvdecode": stdlib.CSVDecodeFunc,
|
|
||||||
"coalesce": stdlib.CoalesceFunc,
|
|
||||||
"coalescelist": stdlib.CoalesceListFunc,
|
|
||||||
"compact": stdlib.CompactFunc,
|
|
||||||
"concat": stdlib.ConcatFunc,
|
|
||||||
"contains": stdlib.ContainsFunc,
|
|
||||||
"distinct": stdlib.DistinctFunc,
|
|
||||||
"divide": stdlib.DivideFunc,
|
|
||||||
"element": stdlib.ElementFunc,
|
|
||||||
"equal": stdlib.EqualFunc,
|
|
||||||
"flatten": stdlib.FlattenFunc,
|
|
||||||
"floor": stdlib.FloorFunc,
|
|
||||||
"formatdate": stdlib.FormatDateFunc,
|
|
||||||
"format": stdlib.FormatFunc,
|
|
||||||
"formatlist": stdlib.FormatListFunc,
|
|
||||||
"greaterthan": stdlib.GreaterThanFunc,
|
|
||||||
"greaterthanorequalto": stdlib.GreaterThanOrEqualToFunc,
|
|
||||||
"hasindex": stdlib.HasIndexFunc,
|
|
||||||
"indent": stdlib.IndentFunc,
|
|
||||||
"index": stdlib.IndexFunc,
|
|
||||||
"int": stdlib.IntFunc,
|
|
||||||
"jsondecode": stdlib.JSONDecodeFunc,
|
|
||||||
"jsonencode": stdlib.JSONEncodeFunc,
|
|
||||||
"keys": stdlib.KeysFunc,
|
|
||||||
"join": stdlib.JoinFunc,
|
|
||||||
"length": stdlib.LengthFunc,
|
|
||||||
"lessthan": stdlib.LessThanFunc,
|
|
||||||
"lessthanorequalto": stdlib.LessThanOrEqualToFunc,
|
|
||||||
"log": stdlib.LogFunc,
|
|
||||||
"lookup": stdlib.LookupFunc,
|
|
||||||
"lower": stdlib.LowerFunc,
|
|
||||||
"max": stdlib.MaxFunc,
|
|
||||||
"merge": stdlib.MergeFunc,
|
|
||||||
"min": stdlib.MinFunc,
|
|
||||||
"modulo": stdlib.ModuloFunc,
|
|
||||||
"multiply": stdlib.MultiplyFunc,
|
|
||||||
"negate": stdlib.NegateFunc,
|
|
||||||
"notequal": stdlib.NotEqualFunc,
|
|
||||||
"not": stdlib.NotFunc,
|
|
||||||
"or": stdlib.OrFunc,
|
|
||||||
"parseint": stdlib.ParseIntFunc,
|
|
||||||
"pow": stdlib.PowFunc,
|
|
||||||
"range": stdlib.RangeFunc,
|
|
||||||
"regexall": stdlib.RegexAllFunc,
|
|
||||||
"regex": stdlib.RegexFunc,
|
|
||||||
"reverse": stdlib.ReverseFunc,
|
|
||||||
"reverselist": stdlib.ReverseListFunc,
|
|
||||||
"sethaselement": stdlib.SetHasElementFunc,
|
|
||||||
"setintersection": stdlib.SetIntersectionFunc,
|
|
||||||
"setsubtract": stdlib.SetSubtractFunc,
|
|
||||||
"setsymmetricdifference": stdlib.SetSymmetricDifferenceFunc,
|
|
||||||
"setunion": stdlib.SetUnionFunc,
|
|
||||||
"signum": stdlib.SignumFunc,
|
|
||||||
"slice": stdlib.SliceFunc,
|
|
||||||
"sort": stdlib.SortFunc,
|
|
||||||
"split": stdlib.SplitFunc,
|
|
||||||
"strlen": stdlib.StrlenFunc,
|
|
||||||
"substr": stdlib.SubstrFunc,
|
|
||||||
"subtract": stdlib.SubtractFunc,
|
|
||||||
"timeadd": stdlib.TimeAddFunc,
|
|
||||||
"title": stdlib.TitleFunc,
|
|
||||||
"trim": stdlib.TrimFunc,
|
|
||||||
"trimprefix": stdlib.TrimPrefixFunc,
|
|
||||||
"trimspace": stdlib.TrimSpaceFunc,
|
|
||||||
"trimsuffix": stdlib.TrimSuffixFunc,
|
|
||||||
"upper": stdlib.UpperFunc,
|
|
||||||
"values": stdlib.ValuesFunc,
|
|
||||||
"zipmap": stdlib.ZipmapFunc,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// Used in the first pass of decoding instead of the Config struct to disallow
|
|
||||||
// interpolation while parsing variable blocks.
|
|
||||||
type staticConfig struct {
|
|
||||||
Variables []*Variable `hcl:"variable,block"`
|
|
||||||
Remain hcl.Body `hcl:",remain"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func ParseHCL(dt []byte, fn string) (_ *Config, err error) {
|
|
||||||
if strings.HasSuffix(fn, ".json") || strings.HasSuffix(fn, ".hcl") {
|
|
||||||
return parseHCL(dt, fn)
|
|
||||||
}
|
|
||||||
cfg, err := parseHCL(dt, fn+".hcl")
|
|
||||||
if err != nil {
|
|
||||||
cfg2, err2 := parseHCL(dt, fn+".json")
|
|
||||||
if err2 == nil {
|
|
||||||
return cfg2, nil
|
|
||||||
}
|
}
|
||||||
|
return f, true, err
|
||||||
}
|
}
|
||||||
return cfg, err
|
if strings.HasSuffix(fn, ".hcl") {
|
||||||
}
|
f, diags := hclparse.NewParser().ParseHCL(dt, fn)
|
||||||
|
if diags.HasErrors() {
|
||||||
func parseHCL(dt []byte, fn string) (_ *Config, err error) {
|
err = diags
|
||||||
defer func() {
|
|
||||||
err = formatHCLError(dt, err)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Decode user defined functions, first parsing as hcl and falling back to
|
|
||||||
// json, returning errors based on the file suffix.
|
|
||||||
file, hcldiags := hclsyntax.ParseConfig(dt, fn, hcl.Pos{Line: 1, Column: 1})
|
|
||||||
if hcldiags.HasErrors() {
|
|
||||||
var jsondiags hcl.Diagnostics
|
|
||||||
file, jsondiags = json.Parse(dt, fn)
|
|
||||||
if jsondiags.HasErrors() {
|
|
||||||
fnl := strings.ToLower(fn)
|
|
||||||
if strings.HasSuffix(fnl, ".json") {
|
|
||||||
return nil, jsondiags
|
|
||||||
} else {
|
|
||||||
return nil, hcldiags
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
return f, true, err
|
||||||
}
|
}
|
||||||
|
f, diags := hclparse.NewParser().ParseHCL(dt, fn+".hcl")
|
||||||
userFunctions, _, diags := userfunc.DecodeUserFunctions(file.Body, "function", func() *hcl.EvalContext {
|
|
||||||
return &hcl.EvalContext{
|
|
||||||
Functions: stdlibFunctions,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if diags.HasErrors() {
|
if diags.HasErrors() {
|
||||||
return nil, diags
|
f, diags2 := hclparse.NewParser().ParseJSON(dt, fn+".json")
|
||||||
}
|
if !diags2.HasErrors() {
|
||||||
|
return f, true, nil
|
||||||
var sc staticConfig
|
|
||||||
|
|
||||||
// Decode only variable blocks without interpolation.
|
|
||||||
if err := hclsimple.Decode(fn, dt, nil, &sc); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set all variables to their default value if defined.
|
|
||||||
variables := make(map[string]cty.Value)
|
|
||||||
for _, variable := range sc.Variables {
|
|
||||||
variables[variable.Name] = cty.StringVal(variable.Default)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Override default with values from environment.
|
|
||||||
for _, env := range os.Environ() {
|
|
||||||
parts := strings.SplitN(env, "=", 2)
|
|
||||||
name, value := parts[0], parts[1]
|
|
||||||
if _, ok := variables[name]; ok {
|
|
||||||
variables[name] = cty.StringVal(value)
|
|
||||||
}
|
}
|
||||||
|
return nil, false, diags
|
||||||
}
|
}
|
||||||
|
return f, true, nil
|
||||||
functions := make(map[string]function.Function)
|
|
||||||
for k, v := range stdlibFunctions {
|
|
||||||
functions[k] = v
|
|
||||||
}
|
|
||||||
for k, v := range userFunctions {
|
|
||||||
functions[k] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := &hcl.EvalContext{
|
|
||||||
Variables: variables,
|
|
||||||
Functions: functions,
|
|
||||||
}
|
|
||||||
|
|
||||||
var c Config
|
|
||||||
|
|
||||||
// Decode with variables and functions.
|
|
||||||
if err := hclsimple.Decode(fn, dt, ctx, &c); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &c, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func formatHCLError(dt []byte, err error) error {
|
func formatHCLError(err error, files []File) error {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -207,6 +49,13 @@ func formatHCLError(dt []byte, err error) error {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if d.Subject != nil {
|
if d.Subject != nil {
|
||||||
|
var dt []byte
|
||||||
|
for _, f := range files {
|
||||||
|
if d.Subject.Filename == f.Name {
|
||||||
|
dt = f.Data
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
src := errdefs.Source{
|
src := errdefs.Source{
|
||||||
Info: &pb.SourceInfo{
|
Info: &pb.SourceInfo{
|
||||||
Filename: d.Subject.Filename,
|
Filename: d.Subject.Filename,
|
||||||
|
|||||||
571
bake/hcl_test.go
571
bake/hcl_test.go
@@ -7,11 +7,9 @@ import (
|
|||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestParseHCL(t *testing.T) {
|
func TestHCLBasic(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
dt := []byte(`
|
||||||
t.Run("Basic", func(t *testing.T) {
|
|
||||||
dt := []byte(`
|
|
||||||
group "default" {
|
group "default" {
|
||||||
targets = ["db", "webapp"]
|
targets = ["db", "webapp"]
|
||||||
}
|
}
|
||||||
@@ -44,32 +42,31 @@ func TestParseHCL(t *testing.T) {
|
|||||||
}
|
}
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseHCL(dt, "docker-bake.hcl")
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(c.Groups))
|
||||||
|
require.Equal(t, "default", c.Groups[0].Name)
|
||||||
|
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 4, len(c.Targets))
|
||||||
require.Equal(t, "default", c.Groups[0].Name)
|
require.Equal(t, c.Targets[0].Name, "db")
|
||||||
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
require.Equal(t, "./db", *c.Targets[0].Context)
|
||||||
|
|
||||||
require.Equal(t, 4, len(c.Targets))
|
require.Equal(t, c.Targets[1].Name, "webapp")
|
||||||
require.Equal(t, c.Targets[0].Name, "db")
|
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||||
require.Equal(t, "./db", *c.Targets[0].Context)
|
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
||||||
|
|
||||||
require.Equal(t, c.Targets[1].Name, "webapp")
|
require.Equal(t, c.Targets[2].Name, "cross")
|
||||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
require.Equal(t, 2, len(c.Targets[2].Platforms))
|
||||||
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[2].Platforms)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[2].Name, "cross")
|
require.Equal(t, c.Targets[3].Name, "webapp-plus")
|
||||||
require.Equal(t, 2, len(c.Targets[2].Platforms))
|
require.Equal(t, 1, len(c.Targets[3].Args))
|
||||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[2].Platforms)
|
require.Equal(t, map[string]string{"IAMCROSS": "true"}, c.Targets[3].Args)
|
||||||
|
}
|
||||||
|
|
||||||
require.Equal(t, c.Targets[3].Name, "webapp-plus")
|
func TestHCLBasicInJSON(t *testing.T) {
|
||||||
require.Equal(t, 1, len(c.Targets[3].Args))
|
dt := []byte(`
|
||||||
require.Equal(t, map[string]string{"IAMCROSS": "true"}, c.Targets[3].Args)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("BasicInJSON", func(t *testing.T) {
|
|
||||||
dt := []byte(`
|
|
||||||
{
|
{
|
||||||
"group": {
|
"group": {
|
||||||
"default": {
|
"default": {
|
||||||
@@ -104,32 +101,32 @@ func TestParseHCL(t *testing.T) {
|
|||||||
}
|
}
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseHCL(dt, "docker-bake.json")
|
c, err := ParseFile(dt, "docker-bake.json")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 1, len(c.Groups))
|
||||||
require.Equal(t, "default", c.Groups[0].Name)
|
require.Equal(t, "default", c.Groups[0].Name)
|
||||||
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"db", "webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 4, len(c.Targets))
|
require.Equal(t, 4, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "db")
|
require.Equal(t, c.Targets[0].Name, "db")
|
||||||
require.Equal(t, "./db", *c.Targets[0].Context)
|
require.Equal(t, "./db", *c.Targets[0].Context)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[1].Name, "webapp")
|
require.Equal(t, c.Targets[1].Name, "webapp")
|
||||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||||
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
||||||
|
|
||||||
require.Equal(t, c.Targets[2].Name, "cross")
|
require.Equal(t, c.Targets[2].Name, "cross")
|
||||||
require.Equal(t, 2, len(c.Targets[2].Platforms))
|
require.Equal(t, 2, len(c.Targets[2].Platforms))
|
||||||
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[2].Platforms)
|
require.Equal(t, []string{"linux/amd64", "linux/arm64"}, c.Targets[2].Platforms)
|
||||||
|
|
||||||
require.Equal(t, c.Targets[3].Name, "webapp-plus")
|
require.Equal(t, c.Targets[3].Name, "webapp-plus")
|
||||||
require.Equal(t, 1, len(c.Targets[3].Args))
|
require.Equal(t, 1, len(c.Targets[3].Args))
|
||||||
require.Equal(t, map[string]string{"IAMCROSS": "true"}, c.Targets[3].Args)
|
require.Equal(t, map[string]string{"IAMCROSS": "true"}, c.Targets[3].Args)
|
||||||
})
|
}
|
||||||
|
|
||||||
t.Run("WithFunctions", func(t *testing.T) {
|
func TestHCLWithFunctions(t *testing.T) {
|
||||||
dt := []byte(`
|
dt := []byte(`
|
||||||
group "default" {
|
group "default" {
|
||||||
targets = ["webapp"]
|
targets = ["webapp"]
|
||||||
}
|
}
|
||||||
@@ -141,20 +138,20 @@ func TestParseHCL(t *testing.T) {
|
|||||||
}
|
}
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseHCL(dt, "docker-bake.hcl")
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 1, len(c.Groups))
|
||||||
require.Equal(t, "default", c.Groups[0].Name)
|
require.Equal(t, "default", c.Groups[0].Name)
|
||||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, c.Targets[0].Name, "webapp")
|
||||||
require.Equal(t, "124", c.Targets[0].Args["buildno"])
|
require.Equal(t, "124", c.Targets[0].Args["buildno"])
|
||||||
})
|
}
|
||||||
|
|
||||||
t.Run("WithUserDefinedFunctions", func(t *testing.T) {
|
func TestHCLWithUserDefinedFunctions(t *testing.T) {
|
||||||
dt := []byte(`
|
dt := []byte(`
|
||||||
function "increment" {
|
function "increment" {
|
||||||
params = [number]
|
params = [number]
|
||||||
result = number + 1
|
result = number + 1
|
||||||
@@ -171,20 +168,20 @@ func TestParseHCL(t *testing.T) {
|
|||||||
}
|
}
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseHCL(dt, "docker-bake.hcl")
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 1, len(c.Groups))
|
||||||
require.Equal(t, "default", c.Groups[0].Name)
|
require.Equal(t, "default", c.Groups[0].Name)
|
||||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, c.Targets[0].Name, "webapp")
|
||||||
require.Equal(t, "124", c.Targets[0].Args["buildno"])
|
require.Equal(t, "124", c.Targets[0].Args["buildno"])
|
||||||
})
|
}
|
||||||
|
|
||||||
t.Run("WithVariables", func(t *testing.T) {
|
func TestHCLWithVariables(t *testing.T) {
|
||||||
dt := []byte(`
|
dt := []byte(`
|
||||||
variable "BUILD_NUMBER" {
|
variable "BUILD_NUMBER" {
|
||||||
default = "123"
|
default = "123"
|
||||||
}
|
}
|
||||||
@@ -200,54 +197,426 @@ func TestParseHCL(t *testing.T) {
|
|||||||
}
|
}
|
||||||
`)
|
`)
|
||||||
|
|
||||||
c, err := ParseHCL(dt, "docker-bake.hcl")
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 1, len(c.Groups))
|
||||||
require.Equal(t, "default", c.Groups[0].Name)
|
require.Equal(t, "default", c.Groups[0].Name)
|
||||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, c.Targets[0].Name, "webapp")
|
||||||
require.Equal(t, "123", c.Targets[0].Args["buildno"])
|
require.Equal(t, "123", c.Targets[0].Args["buildno"])
|
||||||
|
|
||||||
os.Setenv("BUILD_NUMBER", "456")
|
os.Setenv("BUILD_NUMBER", "456")
|
||||||
|
|
||||||
c, err = ParseHCL(dt, "docker-bake.hcl")
|
c, err = ParseFile(dt, "docker-bake.hcl")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Groups))
|
require.Equal(t, 1, len(c.Groups))
|
||||||
require.Equal(t, "default", c.Groups[0].Name)
|
require.Equal(t, "default", c.Groups[0].Name)
|
||||||
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
require.Equal(t, []string{"webapp"}, c.Groups[0].Targets)
|
||||||
|
|
||||||
require.Equal(t, 1, len(c.Targets))
|
require.Equal(t, 1, len(c.Targets))
|
||||||
require.Equal(t, c.Targets[0].Name, "webapp")
|
require.Equal(t, c.Targets[0].Name, "webapp")
|
||||||
require.Equal(t, "456", c.Targets[0].Args["buildno"])
|
require.Equal(t, "456", c.Targets[0].Args["buildno"])
|
||||||
})
|
}
|
||||||
|
|
||||||
t.Run("WithIncorrectVariables", func(t *testing.T) {
|
func TestHCLWithVariablesInFunctions(t *testing.T) {
|
||||||
dt := []byte(`
|
dt := []byte(`
|
||||||
variable "DEFAULT_BUILD_NUMBER" {
|
variable "REPO" {
|
||||||
default = "1"
|
default = "user/repo"
|
||||||
}
|
}
|
||||||
|
function "tag" {
|
||||||
variable "BUILD_NUMBER" {
|
params = [tag]
|
||||||
default = "${DEFAULT_BUILD_NUMBER}"
|
result = ["${REPO}:${tag}"]
|
||||||
}
|
|
||||||
|
|
||||||
group "default" {
|
|
||||||
targets = ["webapp"]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
target "webapp" {
|
target "webapp" {
|
||||||
|
tags = tag("v1")
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "webapp")
|
||||||
|
require.Equal(t, []string{"user/repo:v1"}, c.Targets[0].Tags)
|
||||||
|
|
||||||
|
os.Setenv("REPO", "docker/buildx")
|
||||||
|
|
||||||
|
c, err = ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "webapp")
|
||||||
|
require.Equal(t, []string{"docker/buildx:v1"}, c.Targets[0].Tags)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLMultiFileSharedVariables(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
variable "FOO" {
|
||||||
|
default = "abc"
|
||||||
|
}
|
||||||
|
target "app" {
|
||||||
args = {
|
args = {
|
||||||
buildno = "${BUILD_NUMBER}"
|
v1 = "pre-${FOO}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
dt2 := []byte(`
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
v2 = "${FOO}-post"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
`)
|
`)
|
||||||
|
|
||||||
_, err := ParseHCL(dt, "docker-bake.hcl")
|
c, err := ParseFiles([]File{
|
||||||
require.Error(t, err)
|
{Data: dt, Name: "c1.hcl"},
|
||||||
require.Contains(t, err.Error(), "docker-bake.hcl:7,17-37: Variables not allowed; Variables may not be used here.")
|
{Data: dt2, Name: "c2.hcl"},
|
||||||
})
|
}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "pre-abc", c.Targets[0].Args["v1"])
|
||||||
|
require.Equal(t, "abc-post", c.Targets[0].Args["v2"])
|
||||||
|
|
||||||
|
os.Setenv("FOO", "def")
|
||||||
|
|
||||||
|
c, err = ParseFiles([]File{
|
||||||
|
{Data: dt, Name: "c1.hcl"},
|
||||||
|
{Data: dt2, Name: "c2.hcl"},
|
||||||
|
}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "pre-def", c.Targets[0].Args["v1"])
|
||||||
|
require.Equal(t, "def-post", c.Targets[0].Args["v2"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLVarsWithVars(t *testing.T) {
|
||||||
|
os.Unsetenv("FOO")
|
||||||
|
dt := []byte(`
|
||||||
|
variable "FOO" {
|
||||||
|
default = upper("${BASE}def")
|
||||||
|
}
|
||||||
|
variable "BAR" {
|
||||||
|
default = "-${FOO}-"
|
||||||
|
}
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
v1 = "pre-${BAR}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
dt2 := []byte(`
|
||||||
|
variable "BASE" {
|
||||||
|
default = "abc"
|
||||||
|
}
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
v2 = "${FOO}-post"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFiles([]File{
|
||||||
|
{Data: dt, Name: "c1.hcl"},
|
||||||
|
{Data: dt2, Name: "c2.hcl"},
|
||||||
|
}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "pre--ABCDEF-", c.Targets[0].Args["v1"])
|
||||||
|
require.Equal(t, "ABCDEF-post", c.Targets[0].Args["v2"])
|
||||||
|
|
||||||
|
os.Setenv("BASE", "new")
|
||||||
|
|
||||||
|
c, err = ParseFiles([]File{
|
||||||
|
{Data: dt, Name: "c1.hcl"},
|
||||||
|
{Data: dt2, Name: "c2.hcl"},
|
||||||
|
}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "pre--NEWDEF-", c.Targets[0].Args["v1"])
|
||||||
|
require.Equal(t, "NEWDEF-post", c.Targets[0].Args["v2"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLTypedVariables(t *testing.T) {
|
||||||
|
os.Unsetenv("FOO")
|
||||||
|
dt := []byte(`
|
||||||
|
variable "FOO" {
|
||||||
|
default = 3
|
||||||
|
}
|
||||||
|
variable "IS_FOO" {
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
v1 = FOO > 5 ? "higher" : "lower"
|
||||||
|
v2 = IS_FOO ? "yes" : "no"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "lower", c.Targets[0].Args["v1"])
|
||||||
|
require.Equal(t, "yes", c.Targets[0].Args["v2"])
|
||||||
|
|
||||||
|
os.Setenv("FOO", "5.1")
|
||||||
|
os.Setenv("IS_FOO", "0")
|
||||||
|
|
||||||
|
c, err = ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "higher", c.Targets[0].Args["v1"])
|
||||||
|
require.Equal(t, "no", c.Targets[0].Args["v2"])
|
||||||
|
|
||||||
|
os.Setenv("FOO", "NaN")
|
||||||
|
_, err = ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "failed to parse FOO as number")
|
||||||
|
|
||||||
|
os.Setenv("FOO", "0")
|
||||||
|
os.Setenv("IS_FOO", "maybe")
|
||||||
|
|
||||||
|
_, err = ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "failed to parse IS_FOO as bool")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLVariableCycle(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
variable "FOO" {
|
||||||
|
default = BAR
|
||||||
|
}
|
||||||
|
variable "FOO2" {
|
||||||
|
default = FOO
|
||||||
|
}
|
||||||
|
variable "BAR" {
|
||||||
|
default = FOO
|
||||||
|
}
|
||||||
|
target "app" {}
|
||||||
|
`)
|
||||||
|
|
||||||
|
_, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.Error(t, err)
|
||||||
|
require.Contains(t, err.Error(), "variable cycle not allowed")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLAttrs(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
FOO="abc"
|
||||||
|
BAR="attr-${FOO}def"
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
"v1": BAR
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "attr-abcdef", c.Targets[0].Args["v1"])
|
||||||
|
|
||||||
|
// env does not apply if no variable
|
||||||
|
os.Setenv("FOO", "bar")
|
||||||
|
c, err = ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "attr-abcdef", c.Targets[0].Args["v1"])
|
||||||
|
// attr-multifile
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLAttrsCustomType(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
platforms=["linux/arm64", "linux/amd64"]
|
||||||
|
target "app" {
|
||||||
|
platforms = platforms
|
||||||
|
args = {
|
||||||
|
"v1": platforms[0]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, []string{"linux/arm64", "linux/amd64"}, c.Targets[0].Platforms)
|
||||||
|
require.Equal(t, "linux/arm64", c.Targets[0].Args["v1"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLMultiFileAttrs(t *testing.T) {
|
||||||
|
os.Unsetenv("FOO")
|
||||||
|
dt := []byte(`
|
||||||
|
variable "FOO" {
|
||||||
|
default = "abc"
|
||||||
|
}
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
v1 = "pre-${FOO}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
dt2 := []byte(`
|
||||||
|
FOO="def"
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFiles([]File{
|
||||||
|
{Data: dt, Name: "c1.hcl"},
|
||||||
|
{Data: dt2, Name: "c2.hcl"},
|
||||||
|
}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "pre-def", c.Targets[0].Args["v1"])
|
||||||
|
|
||||||
|
os.Setenv("FOO", "ghi")
|
||||||
|
|
||||||
|
c, err = ParseFiles([]File{
|
||||||
|
{Data: dt, Name: "c1.hcl"},
|
||||||
|
{Data: dt2, Name: "c2.hcl"},
|
||||||
|
}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "pre-ghi", c.Targets[0].Args["v1"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJSONAttributes(t *testing.T) {
|
||||||
|
dt := []byte(`{"FOO": "abc", "variable": {"BAR": {"default": "def"}}, "target": { "app": { "args": {"v1": "pre-${FOO}-${BAR}"}} } }`)
|
||||||
|
|
||||||
|
c, err := ParseFile(dt, "docker-bake.json")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "pre-abc-def", c.Targets[0].Args["v1"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestJSONFunctions(t *testing.T) {
|
||||||
|
dt := []byte(`{
|
||||||
|
"FOO": "abc",
|
||||||
|
"function": {
|
||||||
|
"myfunc": {
|
||||||
|
"params": ["inp"],
|
||||||
|
"result": "<${upper(inp)}-${FOO}>"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"app": {
|
||||||
|
"args": {
|
||||||
|
"v1": "pre-${myfunc(\"foo\")}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}}`)
|
||||||
|
|
||||||
|
c, err := ParseFile(dt, "docker-bake.json")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "pre-<FOO-abc>", c.Targets[0].Args["v1"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLFunctionInAttr(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
function "brace" {
|
||||||
|
params = [inp]
|
||||||
|
result = "[${inp}]"
|
||||||
|
}
|
||||||
|
function "myupper" {
|
||||||
|
params = [val]
|
||||||
|
result = "${upper(val)} <> ${brace(v2)}"
|
||||||
|
}
|
||||||
|
|
||||||
|
v1=myupper("foo")
|
||||||
|
v2=lower("BAZ")
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
"v1": v1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFile(dt, "docker-bake.hcl")
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "FOO <> [baz]", c.Targets[0].Args["v1"])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLCombineCompose(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
target "app" {
|
||||||
|
context = "dir"
|
||||||
|
args = {
|
||||||
|
v1 = "foo"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
dt2 := []byte(`
|
||||||
|
version: "3"
|
||||||
|
|
||||||
|
services:
|
||||||
|
app:
|
||||||
|
build:
|
||||||
|
dockerfile: Dockerfile-alternate
|
||||||
|
args:
|
||||||
|
v2: "bar"
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFiles([]File{
|
||||||
|
{Data: dt, Name: "c1.hcl"},
|
||||||
|
{Data: dt2, Name: "c2.yml"},
|
||||||
|
}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "foo", c.Targets[0].Args["v1"])
|
||||||
|
require.Equal(t, "bar", c.Targets[0].Args["v2"])
|
||||||
|
require.Equal(t, "dir", *c.Targets[0].Context)
|
||||||
|
require.Equal(t, "Dockerfile-alternate", *c.Targets[0].Dockerfile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHCLBuiltinVars(t *testing.T) {
|
||||||
|
dt := []byte(`
|
||||||
|
target "app" {
|
||||||
|
context = BAKE_CMD_CONTEXT
|
||||||
|
dockerfile = "test"
|
||||||
|
}
|
||||||
|
`)
|
||||||
|
|
||||||
|
c, err := ParseFiles([]File{
|
||||||
|
{Data: dt, Name: "c1.hcl"},
|
||||||
|
}, map[string]string{
|
||||||
|
"BAKE_CMD_CONTEXT": "foo",
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 1, len(c.Targets))
|
||||||
|
require.Equal(t, c.Targets[0].Name, "app")
|
||||||
|
require.Equal(t, "foo", *c.Targets[0].Context)
|
||||||
|
require.Equal(t, "test", *c.Targets[0].Dockerfile)
|
||||||
}
|
}
|
||||||
|
|||||||
153
bake/hclparser/expr.go
Normal file
153
bake/hclparser/expr.go
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
func funcCalls(exp hcl.Expression) ([]string, hcl.Diagnostics) {
|
||||||
|
node, ok := exp.(hclsyntax.Node)
|
||||||
|
if !ok {
|
||||||
|
fns, err := jsonFuncCallsRecursive(exp)
|
||||||
|
if err != nil {
|
||||||
|
return nil, hcl.Diagnostics{
|
||||||
|
&hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Invalid expression",
|
||||||
|
Detail: err.Error(),
|
||||||
|
Subject: exp.Range().Ptr(),
|
||||||
|
Context: exp.Range().Ptr(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fns, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var funcnames []string
|
||||||
|
hcldiags := hclsyntax.VisitAll(node, func(n hclsyntax.Node) hcl.Diagnostics {
|
||||||
|
if fe, ok := n.(*hclsyntax.FunctionCallExpr); ok {
|
||||||
|
funcnames = append(funcnames, fe.Name)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if hcldiags.HasErrors() {
|
||||||
|
return nil, hcldiags
|
||||||
|
}
|
||||||
|
return funcnames, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jsonFuncCallsRecursive(exp hcl.Expression) ([]string, error) {
|
||||||
|
je, ok := exp.(jsonExp)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.Errorf("invalid expression type %T", exp)
|
||||||
|
}
|
||||||
|
m := map[string]struct{}{}
|
||||||
|
for _, e := range elementExpressions(je, exp) {
|
||||||
|
if err := appendJSONFuncCalls(e, m); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
arr := make([]string, 0, len(m))
|
||||||
|
for n := range m {
|
||||||
|
arr = append(arr, n)
|
||||||
|
}
|
||||||
|
return arr, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func appendJSONFuncCalls(exp hcl.Expression, m map[string]struct{}) error {
|
||||||
|
v := reflect.ValueOf(exp)
|
||||||
|
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||||
|
return errors.Errorf("invalid json expression kind %T %v", exp, v.Kind())
|
||||||
|
}
|
||||||
|
src := v.Elem().FieldByName("src")
|
||||||
|
if src.IsZero() {
|
||||||
|
return errors.Errorf("%v has no property src", v.Elem().Type())
|
||||||
|
}
|
||||||
|
if src.Kind() != reflect.Interface {
|
||||||
|
return errors.Errorf("%v src is not interface: %v", src.Type(), src.Kind())
|
||||||
|
}
|
||||||
|
src = src.Elem()
|
||||||
|
if src.IsNil() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if src.Kind() == reflect.Ptr {
|
||||||
|
src = src.Elem()
|
||||||
|
}
|
||||||
|
if src.Kind() != reflect.Struct {
|
||||||
|
return errors.Errorf("%v is not struct: %v", src.Type(), src.Kind())
|
||||||
|
}
|
||||||
|
|
||||||
|
// hcl/v2/json/ast#stringVal
|
||||||
|
val := src.FieldByName("Value")
|
||||||
|
if val.IsZero() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
rng := src.FieldByName("SrcRange")
|
||||||
|
if val.IsZero() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var stringVal struct {
|
||||||
|
Value string
|
||||||
|
SrcRange hcl.Range
|
||||||
|
}
|
||||||
|
|
||||||
|
if !val.Type().AssignableTo(reflect.ValueOf(stringVal.Value).Type()) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if !rng.Type().AssignableTo(reflect.ValueOf(stringVal.SrcRange).Type()) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// reflect.Set does not work for unexported fields
|
||||||
|
stringVal.Value = *(*string)(unsafe.Pointer(val.UnsafeAddr()))
|
||||||
|
stringVal.SrcRange = *(*hcl.Range)(unsafe.Pointer(rng.UnsafeAddr()))
|
||||||
|
|
||||||
|
expr, diags := hclsyntax.ParseExpression([]byte(stringVal.Value), stringVal.SrcRange.Filename, stringVal.SrcRange.Start)
|
||||||
|
if diags.HasErrors() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fns, err := funcCalls(expr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fn := range fns {
|
||||||
|
m[fn] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type jsonExp interface {
|
||||||
|
ExprList() []hcl.Expression
|
||||||
|
ExprMap() []hcl.KeyValuePair
|
||||||
|
}
|
||||||
|
|
||||||
|
func elementExpressions(je jsonExp, exp hcl.Expression) []hcl.Expression {
|
||||||
|
list := je.ExprList()
|
||||||
|
if len(list) != 0 {
|
||||||
|
exp := make([]hcl.Expression, 0, len(list))
|
||||||
|
for _, e := range list {
|
||||||
|
if je, ok := e.(jsonExp); ok {
|
||||||
|
exp = append(exp, elementExpressions(je, e)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return exp
|
||||||
|
}
|
||||||
|
kvlist := je.ExprMap()
|
||||||
|
if len(kvlist) != 0 {
|
||||||
|
exp := make([]hcl.Expression, 0, len(kvlist)*2)
|
||||||
|
for _, p := range kvlist {
|
||||||
|
exp = append(exp, p.Key)
|
||||||
|
if je, ok := p.Value.(jsonExp); ok {
|
||||||
|
exp = append(exp, elementExpressions(je, p.Value)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return exp
|
||||||
|
}
|
||||||
|
return []hcl.Expression{exp}
|
||||||
|
}
|
||||||
498
bake/hclparser/hclparser.go
Normal file
498
bake/hclparser/hclparser.go
Normal file
@@ -0,0 +1,498 @@
|
|||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/util/userfunc"
|
||||||
|
"github.com/hashicorp/hcl/v2"
|
||||||
|
"github.com/hashicorp/hcl/v2/gohcl"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/zclconf/go-cty/cty"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Opt struct {
|
||||||
|
LookupVar func(string) (string, bool)
|
||||||
|
Vars map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
type variable struct {
|
||||||
|
Name string `json:"-" hcl:"name,label"`
|
||||||
|
Default *hcl.Attribute `json:"default,omitempty" hcl:"default,optional"`
|
||||||
|
Body hcl.Body `json:"-" hcl:",body"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type functionDef struct {
|
||||||
|
Name string `json:"-" hcl:"name,label"`
|
||||||
|
Params *hcl.Attribute `json:"params,omitempty" hcl:"params"`
|
||||||
|
Variadic *hcl.Attribute `json:"variadic_param,omitempty" hcl:"variadic_params"`
|
||||||
|
Result *hcl.Attribute `json:"result,omitempty" hcl:"result"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type inputs struct {
|
||||||
|
Variables []*variable `hcl:"variable,block"`
|
||||||
|
Functions []*functionDef `hcl:"function,block"`
|
||||||
|
|
||||||
|
Remain hcl.Body `json:"-" hcl:",remain"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type parser struct {
|
||||||
|
opt Opt
|
||||||
|
|
||||||
|
vars map[string]*variable
|
||||||
|
attrs map[string]*hcl.Attribute
|
||||||
|
funcs map[string]*functionDef
|
||||||
|
|
||||||
|
ectx *hcl.EvalContext
|
||||||
|
|
||||||
|
progress map[string]struct{}
|
||||||
|
progressF map[string]struct{}
|
||||||
|
doneF map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) loadDeps(exp hcl.Expression, exclude map[string]struct{}) hcl.Diagnostics {
|
||||||
|
fns, hcldiags := funcCalls(exp)
|
||||||
|
if hcldiags.HasErrors() {
|
||||||
|
return hcldiags
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fn := range fns {
|
||||||
|
if err := p.resolveFunction(fn); err != nil {
|
||||||
|
return hcl.Diagnostics{
|
||||||
|
&hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Invalid expression",
|
||||||
|
Detail: err.Error(),
|
||||||
|
Subject: exp.Range().Ptr(),
|
||||||
|
Context: exp.Range().Ptr(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range exp.Variables() {
|
||||||
|
if _, ok := exclude[v.RootName()]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := p.resolveValue(v.RootName()); err != nil {
|
||||||
|
return hcl.Diagnostics{
|
||||||
|
&hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Invalid expression",
|
||||||
|
Detail: err.Error(),
|
||||||
|
Subject: v.SourceRange().Ptr(),
|
||||||
|
Context: v.SourceRange().Ptr(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) resolveFunction(name string) error {
|
||||||
|
if _, ok := p.doneF[name]; ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
f, ok := p.funcs[name]
|
||||||
|
if !ok {
|
||||||
|
if _, ok := p.ectx.Functions[name]; ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return errors.Errorf("undefined function %s", name)
|
||||||
|
}
|
||||||
|
if _, ok := p.progressF[name]; ok {
|
||||||
|
return errors.Errorf("function cycle not allowed for %s", name)
|
||||||
|
}
|
||||||
|
p.progressF[name] = struct{}{}
|
||||||
|
|
||||||
|
paramExprs, paramsDiags := hcl.ExprList(f.Params.Expr)
|
||||||
|
if paramsDiags.HasErrors() {
|
||||||
|
return paramsDiags
|
||||||
|
}
|
||||||
|
var diags hcl.Diagnostics
|
||||||
|
params := map[string]struct{}{}
|
||||||
|
for _, paramExpr := range paramExprs {
|
||||||
|
param := hcl.ExprAsKeyword(paramExpr)
|
||||||
|
if param == "" {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Invalid param element",
|
||||||
|
Detail: "Each parameter name must be an identifier.",
|
||||||
|
Subject: paramExpr.Range().Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
params[param] = struct{}{}
|
||||||
|
}
|
||||||
|
var variadic hcl.Expression
|
||||||
|
if f.Variadic != nil {
|
||||||
|
variadic = f.Variadic.Expr
|
||||||
|
param := hcl.ExprAsKeyword(variadic)
|
||||||
|
if param == "" {
|
||||||
|
diags = append(diags, &hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Invalid param element",
|
||||||
|
Detail: "Each parameter name must be an identifier.",
|
||||||
|
Subject: f.Variadic.Range.Ptr(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
params[param] = struct{}{}
|
||||||
|
}
|
||||||
|
if diags.HasErrors() {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
if diags := p.loadDeps(f.Result.Expr, params); diags.HasErrors() {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
v, diags := userfunc.NewFunction(f.Params.Expr, variadic, f.Result.Expr, func() *hcl.EvalContext {
|
||||||
|
return p.ectx
|
||||||
|
})
|
||||||
|
if diags.HasErrors() {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
p.doneF[name] = struct{}{}
|
||||||
|
p.ectx.Functions[name] = v
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) resolveValue(name string) (err error) {
|
||||||
|
if _, ok := p.ectx.Variables[name]; ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if _, ok := p.progress[name]; ok {
|
||||||
|
return errors.Errorf("variable cycle not allowed for %s", name)
|
||||||
|
}
|
||||||
|
p.progress[name] = struct{}{}
|
||||||
|
|
||||||
|
var v *cty.Value
|
||||||
|
defer func() {
|
||||||
|
if v != nil {
|
||||||
|
p.ectx.Variables[name] = *v
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
def, ok := p.attrs[name]
|
||||||
|
if _, builtin := p.opt.Vars[name]; !ok && !builtin {
|
||||||
|
vr, ok := p.vars[name]
|
||||||
|
if !ok {
|
||||||
|
return errors.Errorf("undefined variable %q", name)
|
||||||
|
}
|
||||||
|
def = vr.Default
|
||||||
|
}
|
||||||
|
|
||||||
|
if def == nil {
|
||||||
|
val, ok := p.opt.Vars[name]
|
||||||
|
if !ok {
|
||||||
|
val, _ = p.opt.LookupVar(name)
|
||||||
|
}
|
||||||
|
vv := cty.StringVal(val)
|
||||||
|
v = &vv
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if diags := p.loadDeps(def.Expr, nil); diags.HasErrors() {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
vv, diags := def.Expr.Value(p.ectx)
|
||||||
|
if diags.HasErrors() {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
_, isVar := p.vars[name]
|
||||||
|
|
||||||
|
if envv, ok := p.opt.LookupVar(name); ok && isVar {
|
||||||
|
if vv.Type().Equals(cty.Bool) {
|
||||||
|
b, err := strconv.ParseBool(envv)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse %s as bool", name)
|
||||||
|
}
|
||||||
|
vv := cty.BoolVal(b)
|
||||||
|
v = &vv
|
||||||
|
return nil
|
||||||
|
} else if vv.Type().Equals(cty.String) {
|
||||||
|
vv := cty.StringVal(envv)
|
||||||
|
v = &vv
|
||||||
|
return nil
|
||||||
|
} else if vv.Type().Equals(cty.Number) {
|
||||||
|
n, err := strconv.ParseFloat(envv, 64)
|
||||||
|
if err == nil && (math.IsNaN(n) || math.IsInf(n, 0)) {
|
||||||
|
err = errors.Errorf("invalid number value")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "failed to parse %s as number", name)
|
||||||
|
}
|
||||||
|
vv := cty.NumberVal(big.NewFloat(n))
|
||||||
|
v = &vv
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
// TODO: support lists with csv values
|
||||||
|
return errors.Errorf("unsupported type %s for variable %s", v.Type(), name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
v = &vv
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
|
||||||
|
reserved := map[string]struct{}{}
|
||||||
|
schema, _ := gohcl.ImpliedBodySchema(val)
|
||||||
|
|
||||||
|
for _, bs := range schema.Blocks {
|
||||||
|
reserved[bs.Type] = struct{}{}
|
||||||
|
}
|
||||||
|
for k := range opt.Vars {
|
||||||
|
reserved[k] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var defs inputs
|
||||||
|
if err := gohcl.DecodeBody(b, nil, &defs); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.LookupVar == nil {
|
||||||
|
opt.LookupVar = func(string) (string, bool) {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p := &parser{
|
||||||
|
opt: opt,
|
||||||
|
|
||||||
|
vars: map[string]*variable{},
|
||||||
|
attrs: map[string]*hcl.Attribute{},
|
||||||
|
funcs: map[string]*functionDef{},
|
||||||
|
|
||||||
|
progress: map[string]struct{}{},
|
||||||
|
progressF: map[string]struct{}{},
|
||||||
|
doneF: map[string]struct{}{},
|
||||||
|
ectx: &hcl.EvalContext{
|
||||||
|
Variables: map[string]cty.Value{},
|
||||||
|
Functions: stdlibFunctions,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range defs.Variables {
|
||||||
|
// TODO: validate name
|
||||||
|
if _, ok := reserved[v.Name]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p.vars[v.Name] = v
|
||||||
|
}
|
||||||
|
for _, v := range defs.Functions {
|
||||||
|
// TODO: validate name
|
||||||
|
if _, ok := reserved[v.Name]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p.funcs[v.Name] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
attrs, diags := b.JustAttributes()
|
||||||
|
if diags.HasErrors() {
|
||||||
|
for _, d := range diags {
|
||||||
|
if d.Detail != "Blocks are not allowed here." {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range attrs {
|
||||||
|
if _, ok := reserved[v.Name]; ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
p.attrs[v.Name] = v
|
||||||
|
}
|
||||||
|
delete(p.attrs, "function")
|
||||||
|
|
||||||
|
for k := range p.opt.Vars {
|
||||||
|
_ = p.resolveValue(k)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k := range p.attrs {
|
||||||
|
if err := p.resolveValue(k); err != nil {
|
||||||
|
if diags, ok := err.(hcl.Diagnostics); ok {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
return hcl.Diagnostics{
|
||||||
|
&hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Invalid attribute",
|
||||||
|
Detail: err.Error(),
|
||||||
|
Subject: &p.attrs[k].Range,
|
||||||
|
Context: &p.attrs[k].Range,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for k := range p.vars {
|
||||||
|
if err := p.resolveValue(k); err != nil {
|
||||||
|
if diags, ok := err.(hcl.Diagnostics); ok {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
r := p.vars[k].Body.MissingItemRange()
|
||||||
|
return hcl.Diagnostics{
|
||||||
|
&hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Invalid value",
|
||||||
|
Detail: err.Error(),
|
||||||
|
Subject: &r,
|
||||||
|
Context: &r,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for k := range p.funcs {
|
||||||
|
if err := p.resolveFunction(k); err != nil {
|
||||||
|
if diags, ok := err.(hcl.Diagnostics); ok {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
return hcl.Diagnostics{
|
||||||
|
&hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Invalid function",
|
||||||
|
Detail: err.Error(),
|
||||||
|
Subject: &p.funcs[k].Params.Range,
|
||||||
|
Context: &p.funcs[k].Params.Range,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
content, _, diags := b.PartialContent(schema)
|
||||||
|
if diags.HasErrors() {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, a := range content.Attributes {
|
||||||
|
return hcl.Diagnostics{
|
||||||
|
&hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Invalid attribute",
|
||||||
|
Detail: "global attributes currently not supported",
|
||||||
|
Subject: &a.Range,
|
||||||
|
Context: &a.Range,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
m := map[string]map[string][]*hcl.Block{}
|
||||||
|
for _, b := range content.Blocks {
|
||||||
|
if len(b.Labels) == 0 || len(b.Labels) > 1 {
|
||||||
|
return hcl.Diagnostics{
|
||||||
|
&hcl.Diagnostic{
|
||||||
|
Severity: hcl.DiagError,
|
||||||
|
Summary: "Invalid block",
|
||||||
|
Detail: fmt.Sprintf("invalid block label: %v", b.Labels),
|
||||||
|
Subject: &b.LabelRanges[0],
|
||||||
|
Context: &b.LabelRanges[0],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bm, ok := m[b.Type]
|
||||||
|
if !ok {
|
||||||
|
bm = map[string][]*hcl.Block{}
|
||||||
|
m[b.Type] = bm
|
||||||
|
}
|
||||||
|
|
||||||
|
lbl := b.Labels[0]
|
||||||
|
bm[lbl] = append(bm[lbl], b)
|
||||||
|
}
|
||||||
|
|
||||||
|
vt := reflect.ValueOf(val).Elem().Type()
|
||||||
|
numFields := vt.NumField()
|
||||||
|
|
||||||
|
type value struct {
|
||||||
|
reflect.Value
|
||||||
|
idx int
|
||||||
|
}
|
||||||
|
type field struct {
|
||||||
|
idx int
|
||||||
|
typ reflect.Type
|
||||||
|
values map[string]value
|
||||||
|
}
|
||||||
|
types := map[string]field{}
|
||||||
|
|
||||||
|
for i := 0; i < numFields; i++ {
|
||||||
|
tags := strings.Split(vt.Field(i).Tag.Get("hcl"), ",")
|
||||||
|
|
||||||
|
types[tags[0]] = field{
|
||||||
|
idx: i,
|
||||||
|
typ: vt.Field(i).Type,
|
||||||
|
values: make(map[string]value),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
diags = hcl.Diagnostics{}
|
||||||
|
for _, b := range content.Blocks {
|
||||||
|
v := reflect.ValueOf(val)
|
||||||
|
|
||||||
|
t, ok := types[b.Type]
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
vv := reflect.New(t.typ.Elem().Elem())
|
||||||
|
diag := gohcl.DecodeBody(b.Body, p.ectx, vv.Interface())
|
||||||
|
if diag.HasErrors() {
|
||||||
|
diags = append(diags, diag...)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
lblIndex := setLabel(vv, b.Labels[0])
|
||||||
|
|
||||||
|
oldValue, exists := t.values[b.Labels[0]]
|
||||||
|
if !exists && lblIndex != -1 {
|
||||||
|
if v.Elem().Field(t.idx).Type().Kind() == reflect.Slice {
|
||||||
|
for i := 0; i < v.Elem().Field(t.idx).Len(); i++ {
|
||||||
|
if b.Labels[0] == v.Elem().Field(t.idx).Index(i).Elem().Field(lblIndex).String() {
|
||||||
|
exists = true
|
||||||
|
oldValue = value{Value: v.Elem().Field(t.idx).Index(i), idx: i}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
if exists {
|
||||||
|
if m := oldValue.Value.MethodByName("Merge"); m.IsValid() {
|
||||||
|
m.Call([]reflect.Value{vv})
|
||||||
|
} else {
|
||||||
|
v.Elem().Field(t.idx).Index(oldValue.idx).Set(vv)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
slice := v.Elem().Field(t.idx)
|
||||||
|
if slice.IsNil() {
|
||||||
|
slice = reflect.New(t.typ).Elem()
|
||||||
|
}
|
||||||
|
t.values[b.Labels[0]] = value{Value: vv, idx: slice.Len()}
|
||||||
|
v.Elem().Field(t.idx).Set(reflect.Append(slice, vv))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if diags.HasErrors() {
|
||||||
|
return diags
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setLabel(v reflect.Value, lbl string) int {
|
||||||
|
// cache field index?
|
||||||
|
numFields := v.Elem().Type().NumField()
|
||||||
|
for i := 0; i < numFields; i++ {
|
||||||
|
for _, t := range strings.Split(v.Elem().Type().Field(i).Tag.Get("hcl"), ",") {
|
||||||
|
if t == "label" {
|
||||||
|
v.Elem().Field(i).Set(reflect.ValueOf(lbl))
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
||||||
111
bake/hclparser/stdlib.go
Normal file
111
bake/hclparser/stdlib.go
Normal file
@@ -0,0 +1,111 @@
|
|||||||
|
package hclparser
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/hashicorp/go-cty-funcs/cidr"
|
||||||
|
"github.com/hashicorp/go-cty-funcs/crypto"
|
||||||
|
"github.com/hashicorp/go-cty-funcs/encoding"
|
||||||
|
"github.com/hashicorp/go-cty-funcs/uuid"
|
||||||
|
"github.com/hashicorp/hcl/v2/ext/tryfunc"
|
||||||
|
"github.com/hashicorp/hcl/v2/ext/typeexpr"
|
||||||
|
"github.com/zclconf/go-cty/cty/function"
|
||||||
|
"github.com/zclconf/go-cty/cty/function/stdlib"
|
||||||
|
)
|
||||||
|
|
||||||
|
var stdlibFunctions = map[string]function.Function{
|
||||||
|
"absolute": stdlib.AbsoluteFunc,
|
||||||
|
"add": stdlib.AddFunc,
|
||||||
|
"and": stdlib.AndFunc,
|
||||||
|
"base64decode": encoding.Base64DecodeFunc,
|
||||||
|
"base64encode": encoding.Base64EncodeFunc,
|
||||||
|
"bcrypt": crypto.BcryptFunc,
|
||||||
|
"byteslen": stdlib.BytesLenFunc,
|
||||||
|
"bytesslice": stdlib.BytesSliceFunc,
|
||||||
|
"can": tryfunc.CanFunc,
|
||||||
|
"ceil": stdlib.CeilFunc,
|
||||||
|
"chomp": stdlib.ChompFunc,
|
||||||
|
"chunklist": stdlib.ChunklistFunc,
|
||||||
|
"cidrhost": cidr.HostFunc,
|
||||||
|
"cidrnetmask": cidr.NetmaskFunc,
|
||||||
|
"cidrsubnet": cidr.SubnetFunc,
|
||||||
|
"cidrsubnets": cidr.SubnetsFunc,
|
||||||
|
"csvdecode": stdlib.CSVDecodeFunc,
|
||||||
|
"coalesce": stdlib.CoalesceFunc,
|
||||||
|
"coalescelist": stdlib.CoalesceListFunc,
|
||||||
|
"compact": stdlib.CompactFunc,
|
||||||
|
"concat": stdlib.ConcatFunc,
|
||||||
|
"contains": stdlib.ContainsFunc,
|
||||||
|
"convert": typeexpr.ConvertFunc,
|
||||||
|
"distinct": stdlib.DistinctFunc,
|
||||||
|
"divide": stdlib.DivideFunc,
|
||||||
|
"element": stdlib.ElementFunc,
|
||||||
|
"equal": stdlib.EqualFunc,
|
||||||
|
"flatten": stdlib.FlattenFunc,
|
||||||
|
"floor": stdlib.FloorFunc,
|
||||||
|
"formatdate": stdlib.FormatDateFunc,
|
||||||
|
"format": stdlib.FormatFunc,
|
||||||
|
"formatlist": stdlib.FormatListFunc,
|
||||||
|
"greaterthan": stdlib.GreaterThanFunc,
|
||||||
|
"greaterthanorequalto": stdlib.GreaterThanOrEqualToFunc,
|
||||||
|
"hasindex": stdlib.HasIndexFunc,
|
||||||
|
"indent": stdlib.IndentFunc,
|
||||||
|
"index": stdlib.IndexFunc,
|
||||||
|
"int": stdlib.IntFunc,
|
||||||
|
"jsondecode": stdlib.JSONDecodeFunc,
|
||||||
|
"jsonencode": stdlib.JSONEncodeFunc,
|
||||||
|
"keys": stdlib.KeysFunc,
|
||||||
|
"join": stdlib.JoinFunc,
|
||||||
|
"length": stdlib.LengthFunc,
|
||||||
|
"lessthan": stdlib.LessThanFunc,
|
||||||
|
"lessthanorequalto": stdlib.LessThanOrEqualToFunc,
|
||||||
|
"log": stdlib.LogFunc,
|
||||||
|
"lookup": stdlib.LookupFunc,
|
||||||
|
"lower": stdlib.LowerFunc,
|
||||||
|
"max": stdlib.MaxFunc,
|
||||||
|
"md5": crypto.Md5Func,
|
||||||
|
"merge": stdlib.MergeFunc,
|
||||||
|
"min": stdlib.MinFunc,
|
||||||
|
"modulo": stdlib.ModuloFunc,
|
||||||
|
"multiply": stdlib.MultiplyFunc,
|
||||||
|
"negate": stdlib.NegateFunc,
|
||||||
|
"notequal": stdlib.NotEqualFunc,
|
||||||
|
"not": stdlib.NotFunc,
|
||||||
|
"or": stdlib.OrFunc,
|
||||||
|
"parseint": stdlib.ParseIntFunc,
|
||||||
|
"pow": stdlib.PowFunc,
|
||||||
|
"range": stdlib.RangeFunc,
|
||||||
|
"regexall": stdlib.RegexAllFunc,
|
||||||
|
"regex": stdlib.RegexFunc,
|
||||||
|
"regex_replace": stdlib.RegexReplaceFunc,
|
||||||
|
"reverse": stdlib.ReverseFunc,
|
||||||
|
"reverselist": stdlib.ReverseListFunc,
|
||||||
|
"rsadecrypt": crypto.RsaDecryptFunc,
|
||||||
|
"sethaselement": stdlib.SetHasElementFunc,
|
||||||
|
"setintersection": stdlib.SetIntersectionFunc,
|
||||||
|
"setproduct": stdlib.SetProductFunc,
|
||||||
|
"setsubtract": stdlib.SetSubtractFunc,
|
||||||
|
"setsymmetricdifference": stdlib.SetSymmetricDifferenceFunc,
|
||||||
|
"setunion": stdlib.SetUnionFunc,
|
||||||
|
"sha1": crypto.Sha1Func,
|
||||||
|
"sha256": crypto.Sha256Func,
|
||||||
|
"sha512": crypto.Sha512Func,
|
||||||
|
"signum": stdlib.SignumFunc,
|
||||||
|
"slice": stdlib.SliceFunc,
|
||||||
|
"sort": stdlib.SortFunc,
|
||||||
|
"split": stdlib.SplitFunc,
|
||||||
|
"strlen": stdlib.StrlenFunc,
|
||||||
|
"substr": stdlib.SubstrFunc,
|
||||||
|
"subtract": stdlib.SubtractFunc,
|
||||||
|
"timeadd": stdlib.TimeAddFunc,
|
||||||
|
"title": stdlib.TitleFunc,
|
||||||
|
"trim": stdlib.TrimFunc,
|
||||||
|
"trimprefix": stdlib.TrimPrefixFunc,
|
||||||
|
"trimspace": stdlib.TrimSpaceFunc,
|
||||||
|
"trimsuffix": stdlib.TrimSuffixFunc,
|
||||||
|
"try": tryfunc.TryFunc,
|
||||||
|
"upper": stdlib.UpperFunc,
|
||||||
|
"urlencode": encoding.URLEncodeFunc,
|
||||||
|
"uuidv4": uuid.V4Func,
|
||||||
|
"uuidv5": uuid.V5Func,
|
||||||
|
"values": stdlib.ValuesFunc,
|
||||||
|
"zipmap": stdlib.ZipmapFunc,
|
||||||
|
}
|
||||||
@@ -21,9 +21,10 @@ type Input struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
|
func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, names []string, pw progress.Writer) ([]File, *Input, error) {
|
||||||
st, filename, ok := detectHttpContext(url)
|
var filename string
|
||||||
|
st, ok := detectGitContext(url)
|
||||||
if !ok {
|
if !ok {
|
||||||
st, ok = detectGitContext(url)
|
st, filename, ok = detectHTTPContext(url)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil, errors.Errorf("not url context")
|
return nil, nil, errors.Errorf("not url context")
|
||||||
}
|
}
|
||||||
@@ -43,7 +44,7 @@ func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, na
|
|||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := driver.Boot(ctx, di.Driver, pw)
|
c, err := driver.Boot(ctx, ctx, di.Driver, pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -83,7 +84,7 @@ func ReadRemoteFiles(ctx context.Context, dis []build.DriverInfo, url string, na
|
|||||||
}
|
}
|
||||||
|
|
||||||
func IsRemoteURL(url string) bool {
|
func IsRemoteURL(url string) bool {
|
||||||
if _, _, ok := detectHttpContext(url); ok {
|
if _, _, ok := detectHTTPContext(url); ok {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if _, ok := detectGitContext(url); ok {
|
if _, ok := detectGitContext(url); ok {
|
||||||
@@ -92,7 +93,7 @@ func IsRemoteURL(url string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func detectHttpContext(url string) (*llb.State, string, bool) {
|
func detectHTTPContext(url string) (*llb.State, string, bool) {
|
||||||
if httpPrefix.MatchString(url) {
|
if httpPrefix.MatchString(url) {
|
||||||
httpContext := llb.HTTP(url, llb.Filename("context"), llb.WithCustomName("[internal] load remote build context"))
|
httpContext := llb.HTTP(url, llb.Filename("context"), llb.WithCustomName("[internal] load remote build context"))
|
||||||
return &httpContext, "context", true
|
return &httpContext, "context", true
|
||||||
|
|||||||
251
build/build.go
251
build/build.go
@@ -3,6 +3,8 @@ package build
|
|||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"context"
|
"context"
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -19,7 +21,8 @@ import (
|
|||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
clitypes "github.com/docker/cli/cli/config/types"
|
"github.com/docker/buildx/util/resolver"
|
||||||
|
"github.com/docker/cli/opts"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
dockerclient "github.com/docker/docker/client"
|
dockerclient "github.com/docker/docker/client"
|
||||||
@@ -27,14 +30,18 @@ import (
|
|||||||
"github.com/docker/docker/pkg/urlutil"
|
"github.com/docker/docker/pkg/urlutil"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/client/llb"
|
"github.com/moby/buildkit/client/llb"
|
||||||
|
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||||
"github.com/moby/buildkit/session"
|
"github.com/moby/buildkit/session"
|
||||||
"github.com/moby/buildkit/session/upload/uploadprovider"
|
"github.com/moby/buildkit/session/upload/uploadprovider"
|
||||||
|
"github.com/moby/buildkit/util/apicaps"
|
||||||
"github.com/moby/buildkit/util/entitlements"
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
"github.com/moby/buildkit/util/progress/progresswriter"
|
"github.com/moby/buildkit/util/progress/progresswriter"
|
||||||
|
"github.com/moby/buildkit/util/tracing"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -44,26 +51,26 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Options struct {
|
type Options struct {
|
||||||
Inputs Inputs
|
Inputs Inputs
|
||||||
Tags []string
|
|
||||||
Labels map[string]string
|
|
||||||
BuildArgs map[string]string
|
|
||||||
Pull bool
|
|
||||||
ImageIDFile string
|
|
||||||
ExtraHosts []string
|
|
||||||
NetworkMode string
|
|
||||||
|
|
||||||
NoCache bool
|
Allow []entitlements.Entitlement
|
||||||
Target string
|
BuildArgs map[string]string
|
||||||
Platforms []specs.Platform
|
CacheFrom []client.CacheOptionsEntry
|
||||||
Exports []client.ExportEntry
|
CacheTo []client.CacheOptionsEntry
|
||||||
Session []session.Attachable
|
CgroupParent string
|
||||||
|
Exports []client.ExportEntry
|
||||||
CacheFrom []client.CacheOptionsEntry
|
ExtraHosts []string
|
||||||
CacheTo []client.CacheOptionsEntry
|
ImageIDFile string
|
||||||
|
Labels map[string]string
|
||||||
Allow []entitlements.Entitlement
|
NetworkMode string
|
||||||
// DockerTarget
|
NoCache bool
|
||||||
|
Platforms []specs.Platform
|
||||||
|
Pull bool
|
||||||
|
Session []session.Attachable
|
||||||
|
ShmSize opts.MemBytes
|
||||||
|
Tags []string
|
||||||
|
Target string
|
||||||
|
Ulimits *opts.UlimitOpt
|
||||||
}
|
}
|
||||||
|
|
||||||
type Inputs struct {
|
type Inputs struct {
|
||||||
@@ -79,10 +86,7 @@ type DriverInfo struct {
|
|||||||
Name string
|
Name string
|
||||||
Platform []specs.Platform
|
Platform []specs.Platform
|
||||||
Err error
|
Err error
|
||||||
}
|
ImageOpt imagetools.Opt
|
||||||
|
|
||||||
type Auth interface {
|
|
||||||
GetAuthConfig(registryHostname string) (clitypes.AuthConfig, error)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type DockerAPI interface {
|
type DockerAPI interface {
|
||||||
@@ -110,6 +114,7 @@ type driverPair struct {
|
|||||||
driverIndex int
|
driverIndex int
|
||||||
platforms []specs.Platform
|
platforms []specs.Platform
|
||||||
so *client.SolveOpt
|
so *client.SolveOpt
|
||||||
|
bopts gateway.BuildOpts
|
||||||
}
|
}
|
||||||
|
|
||||||
func driverIndexes(m map[string][]driverPair) []int {
|
func driverIndexes(m map[string][]driverPair) []int {
|
||||||
@@ -138,12 +143,13 @@ func allIndexes(l int) []int {
|
|||||||
func ensureBooted(ctx context.Context, drivers []DriverInfo, idxs []int, pw progress.Writer) ([]*client.Client, error) {
|
func ensureBooted(ctx context.Context, drivers []DriverInfo, idxs []int, pw progress.Writer) ([]*client.Client, error) {
|
||||||
clients := make([]*client.Client, len(drivers))
|
clients := make([]*client.Client, len(drivers))
|
||||||
|
|
||||||
|
baseCtx := ctx
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
for _, i := range idxs {
|
for _, i := range idxs {
|
||||||
func(i int) {
|
func(i int) {
|
||||||
eg.Go(func() error {
|
eg.Go(func() error {
|
||||||
c, err := driver.Boot(ctx, drivers[i].Driver, pw)
|
c, err := driver.Boot(ctx, baseCtx, drivers[i].Driver, pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -180,7 +186,48 @@ func splitToDriverPairs(availablePlatforms map[string]int, opt map[string]Option
|
|||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
func resolveDrivers(ctx context.Context, drivers []DriverInfo, auth Auth, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
|
func resolveDrivers(ctx context.Context, drivers []DriverInfo, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
|
||||||
|
dps, clients, err := resolveDriversBase(ctx, drivers, opt, pw)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bopts := make([]gateway.BuildOpts, len(clients))
|
||||||
|
|
||||||
|
span, ctx := tracing.StartSpan(ctx, "load buildkit capabilities", trace.WithSpanKind(trace.SpanKindInternal))
|
||||||
|
|
||||||
|
eg, ctx := errgroup.WithContext(ctx)
|
||||||
|
for i, c := range clients {
|
||||||
|
if c == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
func(i int, c *client.Client) {
|
||||||
|
eg.Go(func() error {
|
||||||
|
clients[i].Build(ctx, client.SolveOpt{}, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||||
|
bopts[i] = c.BuildOpts()
|
||||||
|
return nil, nil
|
||||||
|
}, nil)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}(i, c)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = eg.Wait()
|
||||||
|
tracing.FinishWithError(span, err)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
for key := range dps {
|
||||||
|
for i, dp := range dps[key] {
|
||||||
|
dps[key][i].bopts = bopts[dp.driverIndex]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return dps, clients, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveDriversBase(ctx context.Context, drivers []DriverInfo, opt map[string]Options, pw progress.Writer) (map[string][]driverPair, []*client.Client, error) {
|
||||||
availablePlatforms := map[string]int{}
|
availablePlatforms := map[string]int{}
|
||||||
for i, d := range drivers {
|
for i, d := range drivers {
|
||||||
for _, p := range d.Platform {
|
for _, p := range d.Platform {
|
||||||
@@ -245,6 +292,7 @@ func resolveDrivers(ctx context.Context, drivers []DriverInfo, auth Auth, opt ma
|
|||||||
workers[i] = ww
|
workers[i] = ww
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
}(i)
|
}(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -285,7 +333,7 @@ func toRepoOnly(in string) (string, error) {
|
|||||||
return strings.Join(out, ","), nil
|
return strings.Join(out, ","), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Options, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
|
func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
|
||||||
defers := make([]func(), 0, 2)
|
defers := make([]func(), 0, 2)
|
||||||
releaseF := func() {
|
releaseF := func() {
|
||||||
for _, f := range defers {
|
for _, f := range defers {
|
||||||
@@ -322,15 +370,39 @@ func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Opti
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cacheTo := make([]client.CacheOptionsEntry, 0, len(opt.CacheTo))
|
||||||
|
for _, e := range opt.CacheTo {
|
||||||
|
if e.Type == "gha" {
|
||||||
|
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.gha")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cacheTo = append(cacheTo, e)
|
||||||
|
}
|
||||||
|
|
||||||
|
cacheFrom := make([]client.CacheOptionsEntry, 0, len(opt.CacheFrom))
|
||||||
|
for _, e := range opt.CacheFrom {
|
||||||
|
if e.Type == "gha" {
|
||||||
|
if !bopts.LLBCaps.Contains(apicaps.CapID("cache.gha")) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cacheFrom = append(cacheFrom, e)
|
||||||
|
}
|
||||||
|
|
||||||
so := client.SolveOpt{
|
so := client.SolveOpt{
|
||||||
Frontend: "dockerfile.v0",
|
Frontend: "dockerfile.v0",
|
||||||
FrontendAttrs: map[string]string{},
|
FrontendAttrs: map[string]string{},
|
||||||
LocalDirs: map[string]string{},
|
LocalDirs: map[string]string{},
|
||||||
CacheExports: opt.CacheTo,
|
CacheExports: cacheTo,
|
||||||
CacheImports: opt.CacheFrom,
|
CacheImports: cacheFrom,
|
||||||
AllowedEntitlements: opt.Allow,
|
AllowedEntitlements: opt.Allow,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if opt.CgroupParent != "" {
|
||||||
|
so.FrontendAttrs["cgroup-parent"] = opt.CgroupParent
|
||||||
|
}
|
||||||
|
|
||||||
if v, ok := opt.BuildArgs["BUILDKIT_MULTI_PLATFORM"]; ok {
|
if v, ok := opt.BuildArgs["BUILDKIT_MULTI_PLATFORM"]; ok {
|
||||||
if v, _ := strconv.ParseBool(v); v {
|
if v, _ := strconv.ParseBool(v); v {
|
||||||
so.FrontendAttrs["multi-platform"] = "true"
|
so.FrontendAttrs["multi-platform"] = "true"
|
||||||
@@ -399,6 +471,9 @@ func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Opti
|
|||||||
return nil, nil, notSupported(d, driver.OCIExporter)
|
return nil, nil, notSupported(d, driver.OCIExporter)
|
||||||
}
|
}
|
||||||
if e.Type == "docker" {
|
if e.Type == "docker" {
|
||||||
|
if len(opt.Platforms) > 1 {
|
||||||
|
return nil, nil, errors.Errorf("docker exporter does not currently support exporting manifest lists")
|
||||||
|
}
|
||||||
if e.Output == nil {
|
if e.Output == nil {
|
||||||
if d.IsMobyDriver() {
|
if d.IsMobyDriver() {
|
||||||
e.Type = "image"
|
e.Type = "image"
|
||||||
@@ -435,6 +510,13 @@ func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Opti
|
|||||||
}
|
}
|
||||||
defers = append(defers, releaseLoad)
|
defers = append(defers, releaseLoad)
|
||||||
|
|
||||||
|
if sharedKey := so.LocalDirs["context"]; sharedKey != "" {
|
||||||
|
if p, err := filepath.Abs(sharedKey); err == nil {
|
||||||
|
sharedKey = filepath.Base(p)
|
||||||
|
}
|
||||||
|
so.SharedKey = sharedKey + ":" + tryNodeIdentifier(configDir)
|
||||||
|
}
|
||||||
|
|
||||||
if opt.Pull {
|
if opt.Pull {
|
||||||
so.FrontendAttrs["image-resolve-mode"] = "pull"
|
so.FrontendAttrs["image-resolve-mode"] = "pull"
|
||||||
}
|
}
|
||||||
@@ -465,12 +547,14 @@ func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Opti
|
|||||||
|
|
||||||
// setup networkmode
|
// setup networkmode
|
||||||
switch opt.NetworkMode {
|
switch opt.NetworkMode {
|
||||||
case "host", "none":
|
case "host":
|
||||||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||||
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost)
|
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost)
|
||||||
|
case "none":
|
||||||
|
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||||
case "", "default":
|
case "", "default":
|
||||||
default:
|
default:
|
||||||
return nil, nil, errors.Errorf("network mode %q not supported by buildkit", opt.NetworkMode)
|
return nil, nil, errors.Errorf("network mode %q not supported by buildkit. You can define a custom network for your builder using the network driver-opt in buildx create.", opt.NetworkMode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// setup extrahosts
|
// setup extrahosts
|
||||||
@@ -480,10 +564,23 @@ func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Opti
|
|||||||
}
|
}
|
||||||
so.FrontendAttrs["add-hosts"] = extraHosts
|
so.FrontendAttrs["add-hosts"] = extraHosts
|
||||||
|
|
||||||
|
// setup shm size
|
||||||
|
if opt.ShmSize.Value() > 0 {
|
||||||
|
so.FrontendAttrs["shm-size"] = strconv.FormatInt(opt.ShmSize.Value(), 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
// setup ulimits
|
||||||
|
ulimits, err := toBuildkitUlimits(opt.Ulimits)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
} else if len(ulimits) > 0 {
|
||||||
|
so.FrontendAttrs["ulimit"] = ulimits
|
||||||
|
}
|
||||||
|
|
||||||
return &so, releaseF, nil
|
return &so, releaseF, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, auth Auth, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, docker DockerAPI, configDir string, w progress.Writer) (resp map[string]*client.SolveResponse, err error) {
|
||||||
if len(drivers) == 0 {
|
if len(drivers) == 0 {
|
||||||
return nil, errors.Errorf("driver required for build")
|
return nil, errors.Errorf("driver required for build")
|
||||||
}
|
}
|
||||||
@@ -510,7 +607,7 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
m, clients, err := resolveDrivers(ctx, drivers, auth, opt, w)
|
m, clients, err := resolveDrivers(ctx, drivers, opt, w)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -535,7 +632,7 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
hasMobyDriver = true
|
hasMobyDriver = true
|
||||||
}
|
}
|
||||||
opt.Platforms = dp.platforms
|
opt.Platforms = dp.platforms
|
||||||
so, release, err := toSolveOpt(ctx, d, multiDriver, opt, w, func(name string) (io.WriteCloser, func(), error) {
|
so, release, err := toSolveOpt(ctx, d, multiDriver, opt, dp.bopts, configDir, w, func(name string) (io.WriteCloser, func(), error) {
|
||||||
return newDockerLoader(ctx, docker, name, w)
|
return newDockerLoader(ctx, docker, name, w)
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -579,13 +676,25 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
dps := m[k]
|
dps := m[k]
|
||||||
multiDriver := len(m[k]) > 1
|
multiDriver := len(m[k]) > 1
|
||||||
|
|
||||||
|
var span trace.Span
|
||||||
|
ctx := ctx
|
||||||
|
if multiTarget {
|
||||||
|
span, ctx = tracing.StartSpan(ctx, k)
|
||||||
|
}
|
||||||
|
|
||||||
res := make([]*client.SolveResponse, len(dps))
|
res := make([]*client.SolveResponse, len(dps))
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
wg.Add(len(dps))
|
wg.Add(len(dps))
|
||||||
|
|
||||||
var pushNames string
|
var pushNames string
|
||||||
|
var insecurePush bool
|
||||||
|
|
||||||
eg.Go(func() error {
|
eg.Go(func() (err error) {
|
||||||
|
defer func() {
|
||||||
|
if span != nil {
|
||||||
|
tracing.FinishWithError(span, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
pw := progress.WithPrefix(w, "default", false)
|
pw := progress.WithPrefix(w, "default", false)
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
select {
|
select {
|
||||||
@@ -598,8 +707,9 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
resp[k] = res[0]
|
resp[k] = res[0]
|
||||||
respMu.Unlock()
|
respMu.Unlock()
|
||||||
if len(res) == 1 {
|
if len(res) == 1 {
|
||||||
|
digest := res[0].ExporterResponse["containerimage.digest"]
|
||||||
if opt.ImageIDFile != "" {
|
if opt.ImageIDFile != "" {
|
||||||
return ioutil.WriteFile(opt.ImageIDFile, []byte(res[0].ExporterResponse["containerimage.digest"]), 0644)
|
return ioutil.WriteFile(opt.ImageIDFile, []byte(digest), 0644)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -619,22 +729,41 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(descs) > 0 {
|
if len(descs) > 0 {
|
||||||
itpull := imagetools.New(imagetools.Opt{
|
var imageopt imagetools.Opt
|
||||||
Auth: auth,
|
for _, dp := range dps {
|
||||||
})
|
imageopt = drivers[dp.driverIndex].ImageOpt
|
||||||
|
break
|
||||||
|
}
|
||||||
names := strings.Split(pushNames, ",")
|
names := strings.Split(pushNames, ",")
|
||||||
|
|
||||||
|
if insecurePush {
|
||||||
|
insecureTrue := true
|
||||||
|
httpTrue := true
|
||||||
|
nn, err := reference.ParseNormalizedNamed(names[0])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
imageopt.RegistryConfig = map[string]resolver.RegistryConfig{
|
||||||
|
reference.Domain(nn): {
|
||||||
|
Insecure: &insecureTrue,
|
||||||
|
PlainHTTP: &httpTrue,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
itpull := imagetools.New(imageopt)
|
||||||
|
|
||||||
dt, desc, err := itpull.Combine(ctx, names[0], descs)
|
dt, desc, err := itpull.Combine(ctx, names[0], descs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if opt.ImageIDFile != "" {
|
if opt.ImageIDFile != "" {
|
||||||
return ioutil.WriteFile(opt.ImageIDFile, []byte(desc.Digest), 0644)
|
if err := ioutil.WriteFile(opt.ImageIDFile, []byte(desc.Digest), 0644); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
itpush := imagetools.New(imagetools.Opt{
|
itpush := imagetools.New(imageopt)
|
||||||
Auth: auth,
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, n := range names {
|
for _, n := range names {
|
||||||
nn, err := reference.ParseNormalizedNamed(n)
|
nn, err := reference.ParseNormalizedNamed(n)
|
||||||
@@ -679,6 +808,9 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if ok, _ := strconv.ParseBool(e.Attrs["registry.insecure"]); ok {
|
||||||
|
insecurePush = true
|
||||||
|
}
|
||||||
e.Attrs["name"] = names
|
e.Attrs["name"] = names
|
||||||
e.Attrs["push-by-digest"] = "true"
|
e.Attrs["push-by-digest"] = "true"
|
||||||
so.Exports[i].Attrs = e.Attrs
|
so.Exports[i].Attrs = e.Attrs
|
||||||
@@ -1066,3 +1198,34 @@ func handleLowercaseDockerfile(dir, p string) string {
|
|||||||
}
|
}
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func wrapWriteCloser(wc io.WriteCloser) func(map[string]string) (io.WriteCloser, error) {
|
||||||
|
return func(map[string]string) (io.WriteCloser, error) {
|
||||||
|
return wc, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var nodeIdentifierMu sync.Mutex
|
||||||
|
|
||||||
|
func tryNodeIdentifier(configDir string) (out string) {
|
||||||
|
nodeIdentifierMu.Lock()
|
||||||
|
defer nodeIdentifierMu.Unlock()
|
||||||
|
sessionFile := filepath.Join(configDir, ".buildNodeID")
|
||||||
|
if _, err := os.Lstat(sessionFile); err != nil {
|
||||||
|
if os.IsNotExist(err) { // create a new file with stored randomness
|
||||||
|
b := make([]byte, 8)
|
||||||
|
if _, err := rand.Read(b); err != nil {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
if err := ioutil.WriteFile(sessionFile, []byte(hex.EncodeToString(b)), 0600); err != nil {
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dt, err := ioutil.ReadFile(sessionFile)
|
||||||
|
if err == nil {
|
||||||
|
return string(dt)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func createTempDockerfileFromURL(ctx context.Context, d driver.Driver, url string, pw progress.Writer) (string, error) {
|
func createTempDockerfileFromURL(ctx context.Context, d driver.Driver, url string, pw progress.Writer) (string, error) {
|
||||||
c, err := driver.Boot(ctx, d, pw)
|
c, err := driver.Boot(ctx, ctx, d, pw)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/cli/opts"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -53,3 +54,15 @@ func toBuildkitExtraHosts(inp []string) (string, error) {
|
|||||||
}
|
}
|
||||||
return strings.Join(hosts, ","), nil
|
return strings.Join(hosts, ","), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// toBuildkitUlimits converts ulimits from docker type=soft:hard format to buildkit's csv format
|
||||||
|
func toBuildkitUlimits(inp *opts.UlimitOpt) (string, error) {
|
||||||
|
if inp == nil || len(inp.GetList()) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
ulimits := make([]string, 0, len(inp.GetList()))
|
||||||
|
for _, ulimit := range inp.GetList() {
|
||||||
|
ulimits = append(ulimits, ulimit.String())
|
||||||
|
}
|
||||||
|
return strings.Join(ulimits, ","), nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -15,6 +15,11 @@ import (
|
|||||||
cliflags "github.com/docker/cli/cli/flags"
|
cliflags "github.com/docker/cli/cli/flags"
|
||||||
"github.com/moby/buildkit/solver/errdefs"
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
"github.com/moby/buildkit/util/stack"
|
"github.com/moby/buildkit/util/stack"
|
||||||
|
"github.com/moby/buildkit/util/tracing/detect"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
|
||||||
|
_ "github.com/moby/buildkit/util/tracing/detect/delegated"
|
||||||
|
_ "github.com/moby/buildkit/util/tracing/env"
|
||||||
|
|
||||||
// FIXME: "k8s.io/client-go/plugin/pkg/client/auth/azure" is excluded because of compilation error
|
// FIXME: "k8s.io/client-go/plugin/pkg/client/auth/azure" is excluded because of compilation error
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||||
@@ -31,6 +36,10 @@ var experimental string
|
|||||||
func init() {
|
func init() {
|
||||||
seed.WithTimeAndRand()
|
seed.WithTimeAndRand()
|
||||||
stack.SetVersionInfo(version.Version, version.Revision)
|
stack.SetVersionInfo(version.Version, version.Revision)
|
||||||
|
|
||||||
|
detect.ServiceName = "buildx"
|
||||||
|
// do not log tracing errors to stdio
|
||||||
|
otel.SetErrorHandler(skipErrors{})
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@@ -90,3 +99,7 @@ func main() {
|
|||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type skipErrors struct{}
|
||||||
|
|
||||||
|
func (skipErrors) Handle(err error) {}
|
||||||
|
|||||||
@@ -6,10 +6,14 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/containerd/containerd/platforms"
|
||||||
"github.com/docker/buildx/bake"
|
"github.com/docker/buildx/bake"
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/buildx/util/tracing"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
@@ -17,25 +21,43 @@ import (
|
|||||||
|
|
||||||
type bakeOptions struct {
|
type bakeOptions struct {
|
||||||
files []string
|
files []string
|
||||||
printOnly bool
|
|
||||||
overrides []string
|
overrides []string
|
||||||
|
printOnly bool
|
||||||
commonOptions
|
commonOptions
|
||||||
}
|
}
|
||||||
|
|
||||||
func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error) {
|
func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error) {
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
|
ctx, end, err := tracing.TraceCurrentCommand(ctx, "bake")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
end(err)
|
||||||
|
}()
|
||||||
|
|
||||||
var url string
|
var url string
|
||||||
|
var noTarget bool
|
||||||
|
cmdContext := "cwd://"
|
||||||
|
|
||||||
if len(targets) > 0 {
|
if len(targets) > 0 {
|
||||||
if bake.IsRemoteURL(targets[0]) {
|
if bake.IsRemoteURL(targets[0]) {
|
||||||
url = targets[0]
|
url = targets[0]
|
||||||
targets = targets[1:]
|
targets = targets[1:]
|
||||||
|
if len(targets) > 0 {
|
||||||
|
if bake.IsRemoteURL(targets[0]) {
|
||||||
|
cmdContext = targets[0]
|
||||||
|
targets = targets[1:]
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(targets) == 0 {
|
if len(targets) == 0 {
|
||||||
targets = []string{"default"}
|
targets = []string{"default"}
|
||||||
|
noTarget = true
|
||||||
}
|
}
|
||||||
|
|
||||||
overrides := in.overrides
|
overrides := in.overrides
|
||||||
@@ -43,7 +65,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
|||||||
if in.exportLoad {
|
if in.exportLoad {
|
||||||
return errors.Errorf("push and load may not be set together at the moment")
|
return errors.Errorf("push and load may not be set together at the moment")
|
||||||
}
|
}
|
||||||
overrides = append(overrides, "*.output=type=registry")
|
overrides = append(overrides, "*.push=true")
|
||||||
} else if in.exportLoad {
|
} else if in.exportLoad {
|
||||||
overrides = append(overrides, "*.output=type=docker")
|
overrides = append(overrides, "*.output=type=docker")
|
||||||
}
|
}
|
||||||
@@ -75,6 +97,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
|||||||
|
|
||||||
var files []bake.File
|
var files []bake.File
|
||||||
var inp *bake.Input
|
var inp *bake.Input
|
||||||
|
|
||||||
if url != "" {
|
if url != "" {
|
||||||
files, inp, err = bake.ReadRemoteFiles(ctx, dis, url, in.files, printer)
|
files, inp, err = bake.ReadRemoteFiles(ctx, dis, url, in.files, printer)
|
||||||
} else {
|
} else {
|
||||||
@@ -84,13 +107,43 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err := bake.ReadTargets(ctx, files, targets, overrides)
|
t, g, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{
|
||||||
|
// Don't forget to update documentation if you add a new
|
||||||
|
// built-in variable: docs/reference/buildx_bake.md#built-in-variables
|
||||||
|
"BAKE_CMD_CONTEXT": cmdContext,
|
||||||
|
"BAKE_LOCAL_PLATFORM": platforms.DefaultString(),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// this function can update target context string from the input so call before printOnly check
|
||||||
|
bo, err := bake.TargetsToBuildOpt(t, inp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if in.printOnly {
|
if in.printOnly {
|
||||||
dt, err := json.MarshalIndent(map[string]map[string]*bake.Target{"target": m}, "", " ")
|
defGroup := map[string][]string{
|
||||||
|
"default": targets,
|
||||||
|
}
|
||||||
|
if noTarget {
|
||||||
|
for _, group := range g {
|
||||||
|
if group.Name != "default" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
defGroup = map[string][]string{
|
||||||
|
"default": group.Targets,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dt, err := json.MarshalIndent(struct {
|
||||||
|
Group map[string][]string `json:"group,omitempty"`
|
||||||
|
Target map[string]*bake.Target `json:"target"`
|
||||||
|
}{
|
||||||
|
defGroup,
|
||||||
|
t,
|
||||||
|
}, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -103,12 +156,25 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
bo, err := bake.TargetsToBuildOpt(m, inp)
|
resp, err := build.Build(ctx, dis, bo, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = build.Build(ctx, dis, bo, dockerAPI(dockerCli), dockerCli.ConfigFile(), printer)
|
if len(in.metadataFile) > 0 && resp != nil {
|
||||||
|
mdata := map[string]map[string]string{}
|
||||||
|
for k, r := range resp {
|
||||||
|
mdata[k] = r.ExporterResponse
|
||||||
|
}
|
||||||
|
mdatab, err := json.MarshalIndent(mdata, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := ioutils.AtomicWriteFile(in.metadataFile, mdatab, 0644); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -135,10 +201,10 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
||||||
|
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for `--set=*.output=type=docker`")
|
||||||
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
|
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
|
||||||
flags.StringArrayVar(&options.overrides, "set", nil, "Override target value (eg: targetpattern.key=value)")
|
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for `--set=*.output=type=registry`")
|
||||||
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for --set=*.output=type=registry")
|
flags.StringArrayVar(&options.overrides, "set", nil, "Override target value (e.g., `targetpattern.key=value`)")
|
||||||
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for --set=*.output=type=docker")
|
|
||||||
|
|
||||||
commonBuildFlags(&options.commonOptions, flags)
|
commonBuildFlags(&options.commonOptions, flags)
|
||||||
|
|
||||||
|
|||||||
@@ -2,15 +2,23 @@ package commands
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
|
"github.com/docker/buildx/util/buildflags"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
|
"github.com/docker/buildx/util/tracing"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
|
dockeropts "github.com/docker/cli/opts"
|
||||||
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
|
"github.com/docker/go-units"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
"github.com/moby/buildkit/session/auth/authprovider"
|
"github.com/moby/buildkit/session/auth/authprovider"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
@@ -20,67 +28,58 @@ import (
|
|||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const defaultTargetName = "default"
|
||||||
|
|
||||||
type buildOptions struct {
|
type buildOptions struct {
|
||||||
commonOptions
|
|
||||||
contextPath string
|
contextPath string
|
||||||
dockerfileName string
|
dockerfileName string
|
||||||
tags []string
|
|
||||||
labels []string
|
|
||||||
buildArgs []string
|
|
||||||
|
|
||||||
cacheFrom []string
|
allow []string
|
||||||
cacheTo []string
|
buildArgs []string
|
||||||
target string
|
cacheFrom []string
|
||||||
platforms []string
|
cacheTo []string
|
||||||
secrets []string
|
cgroupParent string
|
||||||
ssh []string
|
extraHosts []string
|
||||||
outputs []string
|
imageIDFile string
|
||||||
imageIDFile string
|
labels []string
|
||||||
extraHosts []string
|
networkMode string
|
||||||
networkMode string
|
outputs []string
|
||||||
|
platforms []string
|
||||||
// unimplemented
|
quiet bool
|
||||||
squash bool
|
secrets []string
|
||||||
quiet bool
|
shmSize dockeropts.MemBytes
|
||||||
|
ssh []string
|
||||||
allow []string
|
tags []string
|
||||||
|
target string
|
||||||
// hidden
|
ulimits *dockeropts.UlimitOpt
|
||||||
// untrusted bool
|
commonOptions
|
||||||
// ulimits *opts.UlimitOpt
|
|
||||||
// memory opts.MemBytes
|
|
||||||
// memorySwap opts.MemSwapBytes
|
|
||||||
// shmSize opts.MemBytes
|
|
||||||
// cpuShares int64
|
|
||||||
// cpuPeriod int64
|
|
||||||
// cpuQuota int64
|
|
||||||
// cpuSetCpus string
|
|
||||||
// cpuSetMems string
|
|
||||||
// cgroupParent string
|
|
||||||
// isolation string
|
|
||||||
// compress bool
|
|
||||||
// securityOpt []string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type commonOptions struct {
|
type commonOptions struct {
|
||||||
builder string
|
builder string
|
||||||
noCache *bool
|
metadataFile string
|
||||||
progress string
|
noCache *bool
|
||||||
pull *bool
|
progress string
|
||||||
|
pull *bool
|
||||||
|
|
||||||
|
// golangci-lint#826
|
||||||
|
// nolint:structcheck
|
||||||
exportPush bool
|
exportPush bool
|
||||||
|
// nolint:structcheck
|
||||||
exportLoad bool
|
exportLoad bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func runBuild(dockerCli command.Cli, in buildOptions) error {
|
func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
||||||
if in.squash {
|
|
||||||
return errors.Errorf("squash currently not implemented")
|
|
||||||
}
|
|
||||||
if in.quiet {
|
|
||||||
logrus.Warnf("quiet currently not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
|
ctx, end, err := tracing.TraceCurrentCommand(ctx, "build")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
end(err)
|
||||||
|
}()
|
||||||
|
|
||||||
noCache := false
|
noCache := false
|
||||||
if in.noCache != nil {
|
if in.noCache != nil {
|
||||||
noCache = *in.noCache
|
noCache = *in.noCache
|
||||||
@@ -90,21 +89,29 @@ func runBuild(dockerCli command.Cli, in buildOptions) error {
|
|||||||
pull = *in.pull
|
pull = *in.pull
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if in.quiet && in.progress != "auto" && in.progress != "quiet" {
|
||||||
|
return errors.Errorf("progress=%s and quiet cannot be used together", in.progress)
|
||||||
|
} else if in.quiet {
|
||||||
|
in.progress = "quiet"
|
||||||
|
}
|
||||||
|
|
||||||
opts := build.Options{
|
opts := build.Options{
|
||||||
Inputs: build.Inputs{
|
Inputs: build.Inputs{
|
||||||
ContextPath: in.contextPath,
|
ContextPath: in.contextPath,
|
||||||
DockerfilePath: in.dockerfileName,
|
DockerfilePath: in.dockerfileName,
|
||||||
InStream: os.Stdin,
|
InStream: os.Stdin,
|
||||||
},
|
},
|
||||||
Tags: in.tags,
|
|
||||||
Labels: listToMap(in.labels, false),
|
|
||||||
BuildArgs: listToMap(in.buildArgs, true),
|
BuildArgs: listToMap(in.buildArgs, true),
|
||||||
Pull: pull,
|
|
||||||
NoCache: noCache,
|
|
||||||
Target: in.target,
|
|
||||||
ImageIDFile: in.imageIDFile,
|
|
||||||
ExtraHosts: in.extraHosts,
|
ExtraHosts: in.extraHosts,
|
||||||
|
ImageIDFile: in.imageIDFile,
|
||||||
|
Labels: listToMap(in.labels, false),
|
||||||
NetworkMode: in.networkMode,
|
NetworkMode: in.networkMode,
|
||||||
|
NoCache: noCache,
|
||||||
|
Pull: pull,
|
||||||
|
ShmSize: in.shmSize,
|
||||||
|
Tags: in.tags,
|
||||||
|
Target: in.target,
|
||||||
|
Ulimits: in.ulimits,
|
||||||
}
|
}
|
||||||
|
|
||||||
platforms, err := platformutil.Parse(in.platforms)
|
platforms, err := platformutil.Parse(in.platforms)
|
||||||
@@ -115,19 +122,23 @@ func runBuild(dockerCli command.Cli, in buildOptions) error {
|
|||||||
|
|
||||||
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(os.Stderr))
|
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(os.Stderr))
|
||||||
|
|
||||||
secrets, err := build.ParseSecretSpecs(in.secrets)
|
secrets, err := buildflags.ParseSecretSpecs(in.secrets)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
opts.Session = append(opts.Session, secrets)
|
opts.Session = append(opts.Session, secrets)
|
||||||
|
|
||||||
ssh, err := build.ParseSSHSpecs(in.ssh)
|
sshSpecs := in.ssh
|
||||||
|
if len(sshSpecs) == 0 && buildflags.IsGitSSH(in.contextPath) {
|
||||||
|
sshSpecs = []string{"default"}
|
||||||
|
}
|
||||||
|
ssh, err := buildflags.ParseSSHSpecs(sshSpecs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
opts.Session = append(opts.Session, ssh)
|
opts.Session = append(opts.Session, ssh)
|
||||||
|
|
||||||
outputs, err := build.ParseOutputs(in.outputs)
|
outputs, err := buildflags.ParseOutputs(in.outputs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -168,19 +179,19 @@ func runBuild(dockerCli command.Cli, in buildOptions) error {
|
|||||||
|
|
||||||
opts.Exports = outputs
|
opts.Exports = outputs
|
||||||
|
|
||||||
cacheImports, err := build.ParseCacheEntry(in.cacheFrom)
|
cacheImports, err := buildflags.ParseCacheEntry(in.cacheFrom)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
opts.CacheFrom = cacheImports
|
opts.CacheFrom = cacheImports
|
||||||
|
|
||||||
cacheExports, err := build.ParseCacheEntry(in.cacheTo)
|
cacheExports, err := buildflags.ParseCacheEntry(in.cacheTo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
opts.CacheTo = cacheExports
|
opts.CacheTo = cacheExports
|
||||||
|
|
||||||
allow, err := build.ParseEntitlements(in.allow)
|
allow, err := buildflags.ParseEntitlements(in.allow)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -192,30 +203,59 @@ func runBuild(dockerCli command.Cli, in buildOptions) error {
|
|||||||
contextPathHash = in.contextPath
|
contextPathHash = in.contextPath
|
||||||
}
|
}
|
||||||
|
|
||||||
return buildTargets(ctx, dockerCli, map[string]build.Options{"default": opts}, in.progress, contextPathHash, in.builder)
|
imageID, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile)
|
||||||
}
|
|
||||||
|
|
||||||
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string) error {
|
|
||||||
dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if in.quiet {
|
||||||
|
fmt.Println(imageID)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]build.Options, progressMode, contextPathHash, instance string, metadataFile string) (imageID string, err error) {
|
||||||
|
dis, err := getInstanceOrDefault(ctx, dockerCli, instance, contextPathHash)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
ctx2, cancel := context.WithCancel(context.TODO())
|
ctx2, cancel := context.WithCancel(context.TODO())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
printer := progress.NewPrinter(ctx2, os.Stderr, progressMode)
|
printer := progress.NewPrinter(ctx2, os.Stderr, progressMode)
|
||||||
|
|
||||||
_, err = build.Build(ctx, dis, opts, dockerAPI(dockerCli), dockerCli.ConfigFile(), printer)
|
resp, err := build.Build(ctx, dis, opts, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||||
err1 := printer.Wait()
|
err1 := printer.Wait()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = err1
|
err = err1
|
||||||
}
|
}
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
if len(metadataFile) > 0 && resp != nil {
|
||||||
|
mdatab, err := json.MarshalIndent(resp[defaultTargetName].ExporterResponse, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if err := ioutils.AtomicWriteFile(metadataFile, mdatab, 0644); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resp[defaultTargetName].ExporterResponse["containerimage.digest"], err
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBuildOptions() buildOptions {
|
||||||
|
ulimits := make(map[string]*units.Ulimit)
|
||||||
|
return buildOptions{
|
||||||
|
ulimits: dockeropts.NewUlimitOpt(&ulimits),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||||
var options buildOptions
|
options := newBuildOptions()
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "build [OPTIONS] PATH | URL | -",
|
Use: "build [OPTIONS] PATH | URL | -",
|
||||||
@@ -225,94 +265,136 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.contextPath = args[0]
|
options.contextPath = args[0]
|
||||||
options.builder = rootOpts.builder
|
options.builder = rootOpts.builder
|
||||||
|
cmd.Flags().VisitAll(checkWarnedFlags)
|
||||||
return runBuild(dockerCli, options)
|
return runBuild(dockerCli, options)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var platformsDefault []string
|
||||||
|
if v := os.Getenv("DOCKER_DEFAULT_PLATFORM"); v != "" {
|
||||||
|
platformsDefault = []string{v}
|
||||||
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for --output=type=registry")
|
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, "Add a custom host-to-IP mapping (format: `host:ip`)")
|
||||||
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for --output=type=docker")
|
flags.SetAnnotation("add-host", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host"})
|
||||||
|
|
||||||
|
flags.StringSliceVar(&options.allow, "allow", []string{}, "Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`)")
|
||||||
|
|
||||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Name and optionally a tag in the 'name:tag' format")
|
|
||||||
flags.StringArrayVar(&options.buildArgs, "build-arg", []string{}, "Set build-time variables")
|
flags.StringArrayVar(&options.buildArgs, "build-arg", []string{}, "Set build-time variables")
|
||||||
flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
|
flags.SetAnnotation("build-arg", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg"})
|
||||||
|
|
||||||
|
flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, "External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`)")
|
||||||
|
|
||||||
|
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, "Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`)")
|
||||||
|
|
||||||
|
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
|
||||||
|
flags.SetAnnotation("cgroup-parent", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent"})
|
||||||
|
|
||||||
|
flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (default: `PATH/Dockerfile`)")
|
||||||
|
flags.SetAnnotation("file", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f"})
|
||||||
|
|
||||||
|
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
|
||||||
|
|
||||||
flags.StringArrayVar(&options.labels, "label", []string{}, "Set metadata for an image")
|
flags.StringArrayVar(&options.labels, "label", []string{}, "Set metadata for an image")
|
||||||
|
|
||||||
flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, "External cache sources (eg. user/app:cache, type=local,src=path/to/dir)")
|
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for `--output=type=docker`")
|
||||||
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, "Cache export destinations (eg. user/app:cache, type=local,dest=path/to/dir)")
|
|
||||||
|
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
|
||||||
|
|
||||||
|
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: `type=local,dest=path`)")
|
||||||
|
|
||||||
|
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
|
||||||
|
|
||||||
|
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for `--output=type=registry`")
|
||||||
|
|
||||||
|
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
||||||
|
|
||||||
|
flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build (format: `id=mysecret,src=/local/secret`)")
|
||||||
|
|
||||||
|
flags.Var(&options.shmSize, "shm-size", "Size of `/dev/shm`")
|
||||||
|
|
||||||
|
flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (format: `default|<id>[=<socket>|<key>[,<key>]]`)")
|
||||||
|
|
||||||
|
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Name and optionally a tag (format: `name:tag`)")
|
||||||
|
flags.SetAnnotation("tag", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t"})
|
||||||
|
|
||||||
flags.StringVar(&options.target, "target", "", "Set the target build stage to build.")
|
flags.StringVar(&options.target, "target", "", "Set the target build stage to build.")
|
||||||
|
flags.SetAnnotation("target", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target"})
|
||||||
|
|
||||||
flags.StringSliceVar(&options.allow, "allow", []string{}, "Allow extra privileged entitlement, e.g. network.host, security.insecure")
|
flags.Var(options.ulimits, "ulimit", "Ulimit options")
|
||||||
|
|
||||||
// not implemented
|
|
||||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
|
||||||
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
|
|
||||||
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, "Add a custom host-to-IP mapping (host:ip)")
|
|
||||||
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
|
|
||||||
flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer")
|
|
||||||
flags.MarkHidden("quiet")
|
|
||||||
flags.MarkHidden("squash")
|
|
||||||
|
|
||||||
// hidden flags
|
// hidden flags
|
||||||
var ignore string
|
var ignore string
|
||||||
var ignoreSlice []string
|
var ignoreSlice []string
|
||||||
var ignoreBool bool
|
var ignoreBool bool
|
||||||
var ignoreInt int64
|
var ignoreInt int64
|
||||||
flags.StringVar(&ignore, "ulimit", "", "Ulimit options")
|
|
||||||
flags.MarkHidden("ulimit")
|
|
||||||
flags.StringSliceVar(&ignoreSlice, "security-opt", []string{}, "Security options")
|
|
||||||
flags.MarkHidden("security-opt")
|
|
||||||
flags.BoolVar(&ignoreBool, "compress", false, "Compress the build context using gzip")
|
flags.BoolVar(&ignoreBool, "compress", false, "Compress the build context using gzip")
|
||||||
flags.MarkHidden("compress")
|
flags.MarkHidden("compress")
|
||||||
flags.StringVarP(&ignore, "memory", "m", "", "Memory limit")
|
|
||||||
flags.MarkHidden("memory")
|
|
||||||
flags.StringVar(&ignore, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap")
|
|
||||||
flags.MarkHidden("memory-swap")
|
|
||||||
flags.StringVar(&ignore, "shm-size", "", "Size of /dev/shm")
|
|
||||||
flags.MarkHidden("shm-size")
|
|
||||||
flags.Int64VarP(&ignoreInt, "cpu-shares", "c", 0, "CPU shares (relative weight)")
|
|
||||||
flags.MarkHidden("cpu-shares")
|
|
||||||
flags.Int64Var(&ignoreInt, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
|
|
||||||
flags.MarkHidden("cpu-period")
|
|
||||||
flags.Int64Var(&ignoreInt, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
|
|
||||||
flags.MarkHidden("cpu-quota")
|
|
||||||
flags.StringVar(&ignore, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
|
|
||||||
flags.MarkHidden("cpuset-cpus")
|
|
||||||
flags.StringVar(&ignore, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)")
|
|
||||||
flags.MarkHidden("cpuset-mems")
|
|
||||||
flags.StringVar(&ignore, "cgroup-parent", "", "Optional parent cgroup for the container")
|
|
||||||
flags.MarkHidden("cgroup-parent")
|
|
||||||
flags.StringVar(&ignore, "isolation", "", "Container isolation technology")
|
flags.StringVar(&ignore, "isolation", "", "Container isolation technology")
|
||||||
flags.MarkHidden("isolation")
|
flags.MarkHidden("isolation")
|
||||||
|
flags.SetAnnotation("isolation", "flag-warn", []string{"isolation flag is deprecated with BuildKit."})
|
||||||
|
|
||||||
|
flags.StringSliceVar(&ignoreSlice, "security-opt", []string{}, "Security options")
|
||||||
|
flags.MarkHidden("security-opt")
|
||||||
|
flags.SetAnnotation("security-opt", "flag-warn", []string{`security-opt flag is deprecated. "RUN --security=insecure" should be used with BuildKit.`})
|
||||||
|
|
||||||
|
flags.BoolVar(&ignoreBool, "squash", false, "Squash newly built layers into a single new layer")
|
||||||
|
flags.MarkHidden("squash")
|
||||||
|
flags.SetAnnotation("squash", "flag-warn", []string{"experimental flag squash is removed with BuildKit. You should squash inside build using a multi-stage Dockerfile for efficiency."})
|
||||||
|
|
||||||
|
flags.StringVarP(&ignore, "memory", "m", "", "Memory limit")
|
||||||
|
flags.MarkHidden("memory")
|
||||||
|
|
||||||
|
flags.StringVar(&ignore, "memory-swap", "", "Swap limit equal to memory plus swap: `-1` to enable unlimited swap")
|
||||||
|
flags.MarkHidden("memory-swap")
|
||||||
|
|
||||||
|
flags.Int64VarP(&ignoreInt, "cpu-shares", "c", 0, "CPU shares (relative weight)")
|
||||||
|
flags.MarkHidden("cpu-shares")
|
||||||
|
|
||||||
|
flags.Int64Var(&ignoreInt, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
|
||||||
|
flags.MarkHidden("cpu-period")
|
||||||
|
|
||||||
|
flags.Int64Var(&ignoreInt, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
|
||||||
|
flags.MarkHidden("cpu-quota")
|
||||||
|
|
||||||
|
flags.StringVar(&ignore, "cpuset-cpus", "", "CPUs in which to allow execution (`0-3`, `0,1`)")
|
||||||
|
flags.MarkHidden("cpuset-cpus")
|
||||||
|
|
||||||
|
flags.StringVar(&ignore, "cpuset-mems", "", "MEMs in which to allow execution (`0-3`, `0,1`)")
|
||||||
|
flags.MarkHidden("cpuset-mems")
|
||||||
|
|
||||||
flags.BoolVar(&ignoreBool, "rm", true, "Remove intermediate containers after a successful build")
|
flags.BoolVar(&ignoreBool, "rm", true, "Remove intermediate containers after a successful build")
|
||||||
flags.MarkHidden("rm")
|
flags.MarkHidden("rm")
|
||||||
|
|
||||||
flags.BoolVar(&ignoreBool, "force-rm", false, "Always remove intermediate containers")
|
flags.BoolVar(&ignoreBool, "force-rm", false, "Always remove intermediate containers")
|
||||||
flags.MarkHidden("force-rm")
|
flags.MarkHidden("force-rm")
|
||||||
|
|
||||||
platformsDefault := []string{}
|
|
||||||
if v := os.Getenv("DOCKER_DEFAULT_PLATFORM"); v != "" {
|
|
||||||
platformsDefault = []string{v}
|
|
||||||
}
|
|
||||||
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
|
|
||||||
|
|
||||||
flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build: id=mysecret,src=/local/secret")
|
|
||||||
|
|
||||||
flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (format: default|<id>[=<socket>|<key>[,<key>]])")
|
|
||||||
|
|
||||||
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: type=local,dest=path)")
|
|
||||||
|
|
||||||
commonBuildFlags(&options.commonOptions, flags)
|
commonBuildFlags(&options.commonOptions, flags)
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) {
|
func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) {
|
||||||
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
|
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
|
||||||
flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (auto, plain, tty). Use plain to show container output")
|
flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output")
|
||||||
options.pull = flags.Bool("pull", false, "Always attempt to pull a newer version of the image")
|
options.pull = flags.Bool("pull", false, "Always attempt to pull a newer version of the image")
|
||||||
|
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to the file")
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkWarnedFlags(f *pflag.Flag) {
|
||||||
|
if !f.Changed {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for t, m := range f.Annotations {
|
||||||
|
switch t {
|
||||||
|
case "flag-warn":
|
||||||
|
logrus.Warn(m[0])
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func listToMap(values []string, defaultEnv bool) map[string]string {
|
func listToMap(values []string, defaultEnv bool) map[string]string {
|
||||||
|
|||||||
@@ -1,13 +1,19 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/csv"
|
"encoding/csv"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
|
"github.com/docker/buildx/store/storeutil"
|
||||||
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/google/shlex"
|
"github.com/google/shlex"
|
||||||
@@ -28,6 +34,7 @@ type createOptions struct {
|
|||||||
flags string
|
flags string
|
||||||
configFile string
|
configFile string
|
||||||
driverOpts []string
|
driverOpts []string
|
||||||
|
bootstrap bool
|
||||||
// upgrade bool // perform upgrade of the driver
|
// upgrade bool // perform upgrade of the driver
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -69,7 +76,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
return errors.Errorf("failed to find driver %q", in.driver)
|
return errors.Errorf("failed to find driver %q", in.driver)
|
||||||
}
|
}
|
||||||
|
|
||||||
txn, release, err := getStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -137,7 +144,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
|
return errors.Errorf("could not create a builder instance with TLS data loaded from environment. Please use `docker context create <context-name>` to create a context for current environment and then create a builder instance with `docker buildx create <context-name>`")
|
||||||
}
|
}
|
||||||
|
|
||||||
ep, err = getCurrentEndpoint(dockerCli)
|
ep, err = storeutil.GetCurrentEndpoint(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -145,7 +152,14 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
|
|
||||||
if in.driver == "kubernetes" {
|
if in.driver == "kubernetes" {
|
||||||
// naming endpoint to make --append works
|
// naming endpoint to make --append works
|
||||||
ep = fmt.Sprintf("%s://%s?deployment=%s", in.driver, in.name, in.nodeName)
|
ep = (&url.URL{
|
||||||
|
Scheme: in.driver,
|
||||||
|
Path: "/" + in.name,
|
||||||
|
RawQuery: (&url.Values{
|
||||||
|
"deployment": {in.nodeName},
|
||||||
|
"kubeconfig": {os.Getenv("KUBECONFIG")},
|
||||||
|
}).Encode(),
|
||||||
|
}).String()
|
||||||
}
|
}
|
||||||
|
|
||||||
m, err := csvToMap(in.driverOpts)
|
m, err := csvToMap(in.driverOpts)
|
||||||
@@ -162,7 +176,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if in.use && ep != "" {
|
if in.use && ep != "" {
|
||||||
current, err := getCurrentEndpoint(dockerCli)
|
current, err := storeutil.GetCurrentEndpoint(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -171,6 +185,21 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ngi := &nginfo{ng: ng}
|
||||||
|
|
||||||
|
timeoutCtx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
if err = loadNodeGroupData(timeoutCtx, dockerCli, ngi); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if in.bootstrap {
|
||||||
|
if _, err = boot(ctx, ngi); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Printf("%s\n", ng.Name)
|
fmt.Printf("%s\n", ng.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -178,9 +207,12 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
func createCmd(dockerCli command.Cli) *cobra.Command {
|
func createCmd(dockerCli command.Cli) *cobra.Command {
|
||||||
var options createOptions
|
var options createOptions
|
||||||
|
|
||||||
var drivers []string
|
var drivers bytes.Buffer
|
||||||
for s := range driver.GetFactories() {
|
for _, d := range driver.GetFactories() {
|
||||||
drivers = append(drivers, s)
|
if len(drivers.String()) > 0 {
|
||||||
|
drivers.WriteString(", ")
|
||||||
|
}
|
||||||
|
drivers.WriteString(fmt.Sprintf("`%s`", d.Name()))
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
@@ -195,18 +227,20 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.StringVar(&options.name, "name", "", "Builder instance name")
|
flags.StringVar(&options.name, "name", "", "Builder instance name")
|
||||||
flags.StringVar(&options.driver, "driver", "", fmt.Sprintf("Driver to use (available: %v)", drivers))
|
flags.StringVar(&options.driver, "driver", "", fmt.Sprintf("Driver to use (available: %s)", drivers.String()))
|
||||||
flags.StringVar(&options.nodeName, "node", "", "Create/modify node with given name")
|
flags.StringVar(&options.nodeName, "node", "", "Create/modify node with given name")
|
||||||
flags.StringVar(&options.flags, "buildkitd-flags", "", "Flags for buildkitd daemon")
|
flags.StringVar(&options.flags, "buildkitd-flags", "", "Flags for buildkitd daemon")
|
||||||
flags.StringVar(&options.configFile, "config", "", "BuildKit config file")
|
flags.StringVar(&options.configFile, "config", "", "BuildKit config file")
|
||||||
flags.StringArrayVar(&options.platform, "platform", []string{}, "Fixed platforms for current node")
|
flags.StringArrayVar(&options.platform, "platform", []string{}, "Fixed platforms for current node")
|
||||||
flags.StringArrayVar(&options.driverOpts, "driver-opt", []string{}, "Options for the driver")
|
flags.StringArrayVar(&options.driverOpts, "driver-opt", []string{}, "Options for the driver")
|
||||||
|
flags.BoolVar(&options.bootstrap, "bootstrap", false, "Boot builder after creation")
|
||||||
|
|
||||||
flags.BoolVar(&options.actionAppend, "append", false, "Append a node to builder instead of changing it")
|
flags.BoolVar(&options.actionAppend, "append", false, "Append a node to builder instead of changing it")
|
||||||
flags.BoolVar(&options.actionLeave, "leave", false, "Remove a node from builder instead of changing it")
|
flags.BoolVar(&options.actionLeave, "leave", false, "Remove a node from builder instead of changing it")
|
||||||
flags.BoolVar(&options.use, "use", false, "Set the current builder instance")
|
flags.BoolVar(&options.use, "use", false, "Set the current builder instance")
|
||||||
|
|
||||||
_ = flags
|
// hide builder persistent flag for this command
|
||||||
|
cobrautil.HideInheritedFlags(cmd, "builder")
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -185,8 +185,6 @@ func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
|
|
||||||
|
|
||||||
if shared > 0 {
|
if shared > 0 {
|
||||||
fmt.Fprintf(tw, "Shared:\t%.2f\n", units.Bytes(shared))
|
fmt.Fprintf(tw, "Shared:\t%.2f\n", units.Bytes(shared))
|
||||||
fmt.Fprintf(tw, "Private:\t%.2f\n", units.Bytes(total-shared))
|
fmt.Fprintf(tw, "Private:\t%.2f\n", units.Bytes(total-shared))
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/store"
|
||||||
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/distribution/reference"
|
"github.com/docker/distribution/reference"
|
||||||
@@ -18,6 +20,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type createOptions struct {
|
type createOptions struct {
|
||||||
|
builder string
|
||||||
files []string
|
files []string
|
||||||
tags []string
|
tags []string
|
||||||
dryrun bool
|
dryrun bool
|
||||||
@@ -101,9 +104,32 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
|
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
r := imagetools.New(imagetools.Opt{
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
Auth: dockerCli.ConfigFile(),
|
if err != nil {
|
||||||
})
|
return err
|
||||||
|
}
|
||||||
|
defer release()
|
||||||
|
|
||||||
|
var ng *store.NodeGroup
|
||||||
|
|
||||||
|
if in.builder != "" {
|
||||||
|
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
r := imagetools.New(imageopt)
|
||||||
|
|
||||||
if sourceRefs {
|
if sourceRefs {
|
||||||
eg, ctx2 := errgroup.WithContext(ctx)
|
eg, ctx2 := errgroup.WithContext(ctx)
|
||||||
@@ -118,7 +144,15 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
srcs[i].Ref = nil
|
srcs[i].Ref = nil
|
||||||
srcs[i].Desc = desc
|
if srcs[i].Desc.Digest == "" {
|
||||||
|
srcs[i].Desc = desc
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
srcs[i].Desc, err = mergeDesc(desc, srcs[i].Desc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}(i)
|
}(i)
|
||||||
@@ -144,9 +178,7 @@ func runCreate(dockerCli command.Cli, in createOptions, args []string) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// new resolver cause need new auth
|
// new resolver cause need new auth
|
||||||
r = imagetools.New(imagetools.Opt{
|
r = imagetools.New(imageopt)
|
||||||
Auth: dockerCli.ConfigFile(),
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, t := range tags {
|
for _, t := range tags {
|
||||||
if err := r.Push(ctx, t, desc, dt); err != nil {
|
if err := r.Push(ctx, t, desc, dt); err != nil {
|
||||||
@@ -168,7 +200,7 @@ func parseSources(in []string) ([]*src, error) {
|
|||||||
for i, in := range in {
|
for i, in := range in {
|
||||||
s, err := parseSource(in)
|
s, err := parseSource(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "failed to parse source %q, valid sources are digests, refereces and descriptors", in)
|
return nil, errors.Wrapf(err, "failed to parse source %q, valid sources are digests, references and descriptors", in)
|
||||||
}
|
}
|
||||||
out[i] = s
|
out[i] = s
|
||||||
}
|
}
|
||||||
@@ -216,25 +248,39 @@ func parseSource(in string) (*src, error) {
|
|||||||
return &s, nil
|
return &s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createCmd(dockerCli command.Cli) *cobra.Command {
|
func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||||
var options createOptions
|
var options createOptions
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "create [OPTIONS] [SOURCE] [SOURCE...]",
|
Use: "create [OPTIONS] [SOURCE] [SOURCE...]",
|
||||||
Short: "Create a new image based on source images",
|
Short: "Create a new image based on source images",
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
options.builder = opts.Builder
|
||||||
return runCreate(dockerCli, options, args)
|
return runCreate(dockerCli, options, args)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Read source descriptor from file")
|
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Read source descriptor from file")
|
||||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image")
|
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image")
|
||||||
flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing")
|
flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing")
|
||||||
flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest")
|
flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest")
|
||||||
|
|
||||||
_ = flags
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func mergeDesc(d1, d2 ocispec.Descriptor) (ocispec.Descriptor, error) {
|
||||||
|
if d2.Size != 0 && d1.Size != d2.Size {
|
||||||
|
return ocispec.Descriptor{}, errors.Errorf("invalid size mismatch for %s, %d != %d", d1.Digest, d2.Size, d1.Size)
|
||||||
|
}
|
||||||
|
if d2.MediaType != "" {
|
||||||
|
d1.MediaType = d2.MediaType
|
||||||
|
}
|
||||||
|
if len(d2.Annotations) != 0 {
|
||||||
|
d1.Annotations = d2.Annotations // no merge so support removes
|
||||||
|
}
|
||||||
|
if d2.Platform != nil {
|
||||||
|
d1.Platform = d2.Platform // missing items filled in later from image config
|
||||||
|
}
|
||||||
|
return d1, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -5,6 +5,8 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
|
"github.com/docker/buildx/store"
|
||||||
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
@@ -14,15 +16,38 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type inspectOptions struct {
|
type inspectOptions struct {
|
||||||
raw bool
|
raw bool
|
||||||
|
builder string
|
||||||
}
|
}
|
||||||
|
|
||||||
func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
r := imagetools.New(imagetools.Opt{
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
Auth: dockerCli.ConfigFile(),
|
if err != nil {
|
||||||
})
|
return err
|
||||||
|
}
|
||||||
|
defer release()
|
||||||
|
|
||||||
|
var ng *store.NodeGroup
|
||||||
|
|
||||||
|
if in.builder != "" {
|
||||||
|
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
r := imagetools.New(imageopt)
|
||||||
|
|
||||||
dt, desc, err := r.Get(ctx, name)
|
dt, desc, err := r.Get(ctx, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -38,7 +63,7 @@ func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
|||||||
// case images.MediaTypeDockerSchema2Manifest, specs.MediaTypeImageManifest:
|
// case images.MediaTypeDockerSchema2Manifest, specs.MediaTypeImageManifest:
|
||||||
// TODO: handle distribution manifest and schema1
|
// TODO: handle distribution manifest and schema1
|
||||||
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||||
imagetools.PrintManifestList(dt, desc, name, os.Stdout)
|
return imagetools.PrintManifestList(dt, desc, name, os.Stdout)
|
||||||
default:
|
default:
|
||||||
fmt.Printf("%s\n", dt)
|
fmt.Printf("%s\n", dt)
|
||||||
}
|
}
|
||||||
@@ -46,7 +71,7 @@ func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func inspectCmd(dockerCli command.Cli) *cobra.Command {
|
func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||||
var options inspectOptions
|
var options inspectOptions
|
||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
@@ -54,15 +79,13 @@ func inspectCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
Short: "Show details of image in the registry",
|
Short: "Show details of image in the registry",
|
||||||
Args: cli.ExactArgs(1),
|
Args: cli.ExactArgs(1),
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
options.builder = rootOpts.Builder
|
||||||
return runInspect(dockerCli, options, args[0])
|
return runInspect(dockerCli, options, args[0])
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.BoolVar(&options.raw, "raw", false, "Show original JSON manifest")
|
flags.BoolVar(&options.raw, "raw", false, "Show original JSON manifest")
|
||||||
|
|
||||||
_ = flags
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,15 +5,19 @@ import (
|
|||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
func RootCmd(dockerCli command.Cli) *cobra.Command {
|
type RootOptions struct {
|
||||||
|
Builder string
|
||||||
|
}
|
||||||
|
|
||||||
|
func RootCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "imagetools",
|
Use: "imagetools",
|
||||||
Short: "Commands to work on images in registry",
|
Short: "Commands to work on images in registry",
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd.AddCommand(
|
cmd.AddCommand(
|
||||||
inspectCmd(dockerCli),
|
inspectCmd(dockerCli, opts),
|
||||||
createCmd(dockerCli),
|
createCmd(dockerCli, opts),
|
||||||
)
|
)
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
|
|||||||
@@ -8,17 +8,13 @@ import (
|
|||||||
"text/tabwriter"
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
|
||||||
"github.com/docker/buildx/driver"
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/buildx/util/progress"
|
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type inspectOptions struct {
|
type inspectOptions struct {
|
||||||
@@ -26,23 +22,10 @@ type inspectOptions struct {
|
|||||||
builder string
|
builder string
|
||||||
}
|
}
|
||||||
|
|
||||||
type dinfo struct {
|
|
||||||
di *build.DriverInfo
|
|
||||||
info *driver.Info
|
|
||||||
platforms []specs.Platform
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
type nginfo struct {
|
|
||||||
ng *store.NodeGroup
|
|
||||||
drivers []dinfo
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
txn, release, err := getStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -51,12 +34,12 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
|||||||
var ng *store.NodeGroup
|
var ng *store.NodeGroup
|
||||||
|
|
||||||
if in.builder != "" {
|
if in.builder != "" {
|
||||||
ng, err = getNodeGroup(txn, dockerCli, in.builder)
|
ng, err = storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ng, err = getCurrentInstance(txn, dockerCli)
|
ng, err = storeutil.GetCurrentInstance(txn, dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -82,7 +65,7 @@ func runInspect(dockerCli command.Cli, in inspectOptions) error {
|
|||||||
var bootNgi *nginfo
|
var bootNgi *nginfo
|
||||||
if in.bootstrap {
|
if in.bootstrap {
|
||||||
var ok bool
|
var ok bool
|
||||||
ok, err = boot(ctx, ngi, dockerCli)
|
ok, err = boot(ctx, ngi)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -149,49 +132,7 @@ func inspectCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.BoolVar(&options.bootstrap, "bootstrap", false, "Ensure builder has booted before inspecting")
|
flags.BoolVar(&options.bootstrap, "bootstrap", false, "Ensure builder has booted before inspecting")
|
||||||
|
|
||||||
_ = flags
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func boot(ctx context.Context, ngi *nginfo, dockerCli command.Cli) (bool, error) {
|
|
||||||
toBoot := make([]int, 0, len(ngi.drivers))
|
|
||||||
for i, d := range ngi.drivers {
|
|
||||||
if d.err != nil || d.di.Err != nil || d.di.Driver == nil || d.info == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if d.info.Status != driver.Running {
|
|
||||||
toBoot = append(toBoot, i)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(toBoot) == 0 {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
printer := progress.NewPrinter(context.TODO(), os.Stderr, "auto")
|
|
||||||
|
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
|
||||||
for _, idx := range toBoot {
|
|
||||||
func(idx int) {
|
|
||||||
eg.Go(func() error {
|
|
||||||
pw := progress.WithPrefix(printer, ngi.ng.Nodes[idx].Name, len(toBoot) > 1)
|
|
||||||
_, err := driver.Boot(ctx, ngi.drivers[idx].di.Driver, pw)
|
|
||||||
if err != nil {
|
|
||||||
ngi.drivers[idx].err = err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}(idx)
|
|
||||||
}
|
|
||||||
|
|
||||||
err := eg.Wait()
|
|
||||||
err1 := printer.Wait()
|
|
||||||
if err == nil {
|
|
||||||
err = err1
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, err
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package commands
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/config"
|
"github.com/docker/cli/cli/config"
|
||||||
@@ -48,5 +49,8 @@ func installCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
Hidden: true,
|
Hidden: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hide builder persistent flag for this command
|
||||||
|
cobrautil.HideInheritedFlags(cmd, "builder")
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -10,6 +10,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
|
"github.com/docker/buildx/store/storeutil"
|
||||||
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
@@ -24,7 +26,7 @@ type lsOptions struct {
|
|||||||
func runLs(dockerCli command.Cli, in lsOptions) error {
|
func runLs(dockerCli command.Cli, in lsOptions) error {
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
txn, release, err := getStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -79,7 +81,7 @@ func runLs(dockerCli command.Cli, in lsOptions) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
currentName := "default"
|
currentName := "default"
|
||||||
current, err := getCurrentInstance(txn, dockerCli)
|
current, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -147,5 +149,8 @@ func lsCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hide builder persistent flag for this command
|
||||||
|
cobrautil.HideInheritedFlags(cmd, "builder")
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -129,7 +129,7 @@ func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
|
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "prune",
|
Use: "prune",
|
||||||
Short: "Remove build cache ",
|
Short: "Remove build cache",
|
||||||
Args: cli.NoArgs,
|
Args: cli.NoArgs,
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
options.builder = rootOpts.builder
|
options.builder = rootOpts.builder
|
||||||
@@ -139,7 +139,7 @@ func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
|
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
|
||||||
flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'until=24h')")
|
flags.Var(&options.filter, "filter", "Provide filter values (e.g., `until=24h`)")
|
||||||
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
|
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
|
||||||
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
|
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
|
||||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
@@ -11,36 +12,37 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type rmOptions struct {
|
type rmOptions struct {
|
||||||
builder string
|
builder string
|
||||||
|
keepState bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func runRm(dockerCli command.Cli, in rmOptions) error {
|
func runRm(dockerCli command.Cli, in rmOptions) error {
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
txn, release, err := getStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer release()
|
defer release()
|
||||||
|
|
||||||
if in.builder != "" {
|
if in.builder != "" {
|
||||||
ng, err := getNodeGroup(txn, dockerCli, in.builder)
|
ng, err := storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err1 := stop(ctx, dockerCli, ng, true)
|
err1 := rm(ctx, dockerCli, ng, in.keepState)
|
||||||
if err := txn.Remove(ng.Name); err != nil {
|
if err := txn.Remove(ng.Name); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return err1
|
return err1
|
||||||
}
|
}
|
||||||
|
|
||||||
ng, err := getCurrentInstance(txn, dockerCli)
|
ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if ng != nil {
|
if ng != nil {
|
||||||
err1 := stop(ctx, dockerCli, ng, true)
|
err1 := rm(ctx, dockerCli, ng, in.keepState)
|
||||||
if err := txn.Remove(ng.Name); err != nil {
|
if err := txn.Remove(ng.Name); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -66,10 +68,13 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
flags := cmd.Flags()
|
||||||
|
flags.BoolVar(&options.keepState, "keep-state", false, "Keep BuildKit state")
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
func stop(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, rm bool) error {
|
func rm(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, keepState bool) error {
|
||||||
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
|
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -79,34 +84,9 @@ func stop(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, rm bo
|
|||||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
if err := di.Driver.Stop(ctx, true); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if rm {
|
if err := di.Driver.Rm(ctx, true, !keepState); err != nil {
|
||||||
if err := di.Driver.Rm(ctx, true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if di.Err != nil {
|
|
||||||
err = di.Err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func stopCurrent(ctx context.Context, dockerCli command.Cli, rm bool) error {
|
|
||||||
dis, err := getDefaultDrivers(ctx, dockerCli, false, "")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, di := range dis {
|
|
||||||
if di.Driver != nil {
|
|
||||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if rm {
|
|
||||||
if err := di.Driver.Rm(ctx, true); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if di.Err != nil {
|
if di.Err != nil {
|
||||||
err = di.Err
|
err = di.Err
|
||||||
|
|||||||
@@ -12,7 +12,8 @@ import (
|
|||||||
|
|
||||||
func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command {
|
func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Command {
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Short: "Build with BuildKit",
|
Short: "Docker Buildx",
|
||||||
|
Long: `Extended build capabilities with BuildKit`,
|
||||||
Use: name,
|
Use: name,
|
||||||
}
|
}
|
||||||
if isPlugin {
|
if isPlugin {
|
||||||
@@ -47,7 +48,7 @@ func addCommands(cmd *cobra.Command, dockerCli command.Cli) {
|
|||||||
versionCmd(dockerCli),
|
versionCmd(dockerCli),
|
||||||
pruneCmd(dockerCli, opts),
|
pruneCmd(dockerCli, opts),
|
||||||
duCmd(dockerCli, opts),
|
duCmd(dockerCli, opts),
|
||||||
imagetoolscmd.RootCmd(dockerCli),
|
imagetoolscmd.RootCmd(dockerCli, imagetoolscmd.RootOptions{Builder: opts.builder}),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,10 @@
|
|||||||
package commands
|
package commands
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/store"
|
||||||
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
"github.com/moby/buildkit/util/appcontext"
|
||||||
@@ -14,32 +18,32 @@ type stopOptions struct {
|
|||||||
func runStop(dockerCli command.Cli, in stopOptions) error {
|
func runStop(dockerCli command.Cli, in stopOptions) error {
|
||||||
ctx := appcontext.Context()
|
ctx := appcontext.Context()
|
||||||
|
|
||||||
txn, release, err := getStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer release()
|
defer release()
|
||||||
|
|
||||||
if in.builder != "" {
|
if in.builder != "" {
|
||||||
ng, err := getNodeGroup(txn, dockerCli, in.builder)
|
ng, err := storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := stop(ctx, dockerCli, ng, false); err != nil {
|
if err := stop(ctx, dockerCli, ng); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ng, err := getCurrentInstance(txn, dockerCli)
|
ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if ng != nil {
|
if ng != nil {
|
||||||
return stop(ctx, dockerCli, ng, false)
|
return stop(ctx, dockerCli, ng)
|
||||||
}
|
}
|
||||||
|
|
||||||
return stopCurrent(ctx, dockerCli, false)
|
return stopCurrent(ctx, dockerCli)
|
||||||
}
|
}
|
||||||
|
|
||||||
func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||||
@@ -58,11 +62,41 @@ func stopCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
|
||||||
|
|
||||||
// flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: type=local,dest=path)")
|
|
||||||
|
|
||||||
_ = flags
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func stop(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup) error {
|
||||||
|
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, di := range dis {
|
||||||
|
if di.Driver != nil {
|
||||||
|
if err := di.Driver.Stop(ctx, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if di.Err != nil {
|
||||||
|
err = di.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func stopCurrent(ctx context.Context, dockerCli command.Cli) error {
|
||||||
|
dis, err := getDefaultDrivers(ctx, dockerCli, false, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, di := range dis {
|
||||||
|
if di.Driver != nil {
|
||||||
|
if err := di.Driver.Stop(ctx, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if di.Err != nil {
|
||||||
|
err = di.Err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package commands
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/config"
|
"github.com/docker/cli/cli/config"
|
||||||
@@ -54,5 +55,8 @@ func uninstallCmd(dockerCli command.Cli) *cobra.Command {
|
|||||||
Hidden: true,
|
Hidden: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hide builder persistent flag for this command
|
||||||
|
cobrautil.HideInheritedFlags(cmd, "builder")
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package commands
|
|||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -16,7 +17,7 @@ type useOptions struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func runUse(dockerCli command.Cli, in useOptions) error {
|
func runUse(dockerCli command.Cli, in useOptions) error {
|
||||||
txn, release, err := getStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -28,7 +29,7 @@ func runUse(dockerCli command.Cli, in useOptions) error {
|
|||||||
return errors.Errorf("run `docker context use default` to switch to default context")
|
return errors.Errorf("run `docker context use default` to switch to default context")
|
||||||
}
|
}
|
||||||
if in.builder == "default" || in.builder == dockerCli.CurrentContext() {
|
if in.builder == "default" || in.builder == dockerCli.CurrentContext() {
|
||||||
ep, err := getCurrentEndpoint(dockerCli)
|
ep, err := storeutil.GetCurrentEndpoint(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -51,7 +52,7 @@ func runUse(dockerCli command.Cli, in useOptions) error {
|
|||||||
return errors.Wrapf(err, "failed to find instance %q", in.builder)
|
return errors.Wrapf(err, "failed to find instance %q", in.builder)
|
||||||
}
|
}
|
||||||
|
|
||||||
ep, err := getCurrentEndpoint(dockerCli)
|
ep, err := storeutil.GetCurrentEndpoint(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -79,11 +80,8 @@ func useCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
|||||||
}
|
}
|
||||||
|
|
||||||
flags := cmd.Flags()
|
flags := cmd.Flags()
|
||||||
|
|
||||||
flags.BoolVar(&options.isGlobal, "global", false, "Builder persists context changes")
|
flags.BoolVar(&options.isGlobal, "global", false, "Builder persists context changes")
|
||||||
flags.BoolVar(&options.isDefault, "default", false, "Set builder as default for current context")
|
flags.BoolVar(&options.isDefault, "default", false, "Set builder as default for current context")
|
||||||
|
|
||||||
_ = flags
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
256
commands/util.go
256
commands/util.go
@@ -2,85 +2,32 @@ package commands
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/build"
|
"github.com/docker/buildx/build"
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/store"
|
"github.com/docker/buildx/store"
|
||||||
|
"github.com/docker/buildx/store/storeutil"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
"github.com/docker/cli/cli/context/docker"
|
"github.com/docker/cli/cli/context/docker"
|
||||||
"github.com/docker/cli/cli/context/kubernetes"
|
"github.com/docker/cli/cli/context/kubernetes"
|
||||||
|
ctxstore "github.com/docker/cli/cli/context/store"
|
||||||
dopts "github.com/docker/cli/opts"
|
dopts "github.com/docker/cli/opts"
|
||||||
dockerclient "github.com/docker/docker/client"
|
dockerclient "github.com/docker/docker/client"
|
||||||
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
|
"k8s.io/client-go/tools/clientcmd"
|
||||||
)
|
)
|
||||||
|
|
||||||
// getStore returns current builder instance store
|
|
||||||
func getStore(dockerCli command.Cli) (*store.Txn, func(), error) {
|
|
||||||
s, err := store.New(getConfigStorePath(dockerCli))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
return s.Txn()
|
|
||||||
}
|
|
||||||
|
|
||||||
// getConfigStorePath will look for correct configuration store path;
|
|
||||||
// if `$BUILDX_CONFIG` is set - use it, otherwise use parent directory
|
|
||||||
// of Docker config file (i.e. `${DOCKER_CONFIG}/buildx`)
|
|
||||||
func getConfigStorePath(dockerCli command.Cli) string {
|
|
||||||
if buildxConfig := os.Getenv("BUILDX_CONFIG"); buildxConfig != "" {
|
|
||||||
logrus.Debugf("using config store %q based in \"$BUILDX_CONFIG\" environment variable", buildxConfig)
|
|
||||||
return buildxConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
buildxConfig := filepath.Join(filepath.Dir(dockerCli.ConfigFile().Filename), "buildx")
|
|
||||||
logrus.Debugf("using default config store %q", buildxConfig)
|
|
||||||
return buildxConfig
|
|
||||||
}
|
|
||||||
|
|
||||||
// getCurrentEndpoint returns the current default endpoint value
|
|
||||||
func getCurrentEndpoint(dockerCli command.Cli) (string, error) {
|
|
||||||
name := dockerCli.CurrentContext()
|
|
||||||
if name != "default" {
|
|
||||||
return name, nil
|
|
||||||
}
|
|
||||||
de, err := getDockerEndpoint(dockerCli, name)
|
|
||||||
if err != nil {
|
|
||||||
return "", errors.Errorf("docker endpoint for %q not found", name)
|
|
||||||
}
|
|
||||||
return de, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getDockerEndpoint returns docker endpoint string for given context
|
|
||||||
func getDockerEndpoint(dockerCli command.Cli, name string) (string, error) {
|
|
||||||
list, err := dockerCli.ContextStore().List()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
for _, l := range list {
|
|
||||||
if l.Name == name {
|
|
||||||
ep, ok := l.Endpoints["docker"]
|
|
||||||
if !ok {
|
|
||||||
return "", errors.Errorf("context %q does not have a Docker endpoint", name)
|
|
||||||
}
|
|
||||||
typed, ok := ep.(docker.EndpointMeta)
|
|
||||||
if !ok {
|
|
||||||
return "", errors.Errorf("endpoint %q is not of type EndpointMeta, %T", ep, ep)
|
|
||||||
}
|
|
||||||
return typed.Host, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// validateEndpoint validates that endpoint is either a context or a docker host
|
// validateEndpoint validates that endpoint is either a context or a docker host
|
||||||
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
||||||
de, err := getDockerEndpoint(dockerCli, ep)
|
de, err := storeutil.GetDockerEndpoint(dockerCli, ep)
|
||||||
if err == nil && de != "" {
|
if err == nil && de != "" {
|
||||||
if ep == "default" {
|
if ep == "default" {
|
||||||
return de, nil
|
return de, nil
|
||||||
@@ -94,60 +41,6 @@ func validateEndpoint(dockerCli command.Cli, ep string) (string, error) {
|
|||||||
return h, nil
|
return h, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getCurrentInstance finds the current builder instance
|
|
||||||
func getCurrentInstance(txn *store.Txn, dockerCli command.Cli) (*store.NodeGroup, error) {
|
|
||||||
ep, err := getCurrentEndpoint(dockerCli)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ng, err := txn.Current(ep)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if ng == nil {
|
|
||||||
ng, _ = getNodeGroup(txn, dockerCli, dockerCli.CurrentContext())
|
|
||||||
}
|
|
||||||
|
|
||||||
return ng, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getNodeGroup returns nodegroup based on the name
|
|
||||||
func getNodeGroup(txn *store.Txn, dockerCli command.Cli, name string) (*store.NodeGroup, error) {
|
|
||||||
ng, err := txn.NodeGroupByName(name)
|
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(errors.Cause(err)) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ng != nil {
|
|
||||||
return ng, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if name == "default" {
|
|
||||||
name = dockerCli.CurrentContext()
|
|
||||||
}
|
|
||||||
|
|
||||||
list, err := dockerCli.ContextStore().List()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, l := range list {
|
|
||||||
if l.Name == name {
|
|
||||||
return &store.NodeGroup{
|
|
||||||
Name: "default",
|
|
||||||
Nodes: []store.Node{
|
|
||||||
{
|
|
||||||
Name: "default",
|
|
||||||
Endpoint: name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, errors.Errorf("no builder %q found", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// driversForNodeGroup returns drivers for a nodegroup instance
|
// driversForNodeGroup returns drivers for a nodegroup instance
|
||||||
func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, contextPathHash string) ([]build.DriverInfo, error) {
|
func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, contextPathHash string) ([]build.DriverInfo, error) {
|
||||||
eg, _ := errgroup.WithContext(ctx)
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
@@ -171,6 +64,10 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
|
|||||||
}
|
}
|
||||||
ng.Driver = f.Name()
|
ng.Driver = f.Name()
|
||||||
}
|
}
|
||||||
|
imageopt, err := storeutil.GetImageConfig(dockerCli, ng)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
for i, n := range ng.Nodes {
|
for i, n := range ng.Nodes {
|
||||||
func(i int, n store.Node) {
|
func(i int, n store.Node) {
|
||||||
@@ -193,12 +90,12 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
|
|||||||
contextStore := dockerCli.ContextStore()
|
contextStore := dockerCli.ContextStore()
|
||||||
|
|
||||||
var kcc driver.KubeClientConfig
|
var kcc driver.KubeClientConfig
|
||||||
kcc, err = kubernetes.ConfigFromContext(n.Endpoint, contextStore)
|
kcc, err = configFromContext(n.Endpoint, contextStore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock".
|
// err is returned if n.Endpoint is non-context name like "unix:///var/run/docker.sock".
|
||||||
// try again with name="default".
|
// try again with name="default".
|
||||||
// FIXME: n should retain real context name.
|
// FIXME: n should retain real context name.
|
||||||
kcc, err = kubernetes.ConfigFromContext("default", contextStore)
|
kcc, err = configFromContext("default", contextStore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Error(err)
|
logrus.Error(err)
|
||||||
}
|
}
|
||||||
@@ -220,12 +117,13 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, f, dockerapi, dockerCli.ConfigFile(), kcc, n.Flags, n.ConfigFile, assignDriverOptsByDriverInfo(n.DriverOpts, di), contextPathHash)
|
d, err := driver.GetDriver(ctx, "buildx_buildkit_"+n.Name, f, dockerapi, imageopt.Auth, kcc, n.Flags, n.Files, n.DriverOpts, n.Platforms, contextPathHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
di.Err = err
|
di.Err = err
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
di.Driver = d
|
di.Driver = d
|
||||||
|
di.ImageOpt = imageopt
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}(i, n)
|
}(i, n)
|
||||||
@@ -238,18 +136,20 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
|
|||||||
return dis, nil
|
return dis, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// pass platform as driver opts to provide for some drive, like kubernetes
|
func configFromContext(endpointName string, s ctxstore.Reader) (clientcmd.ClientConfig, error) {
|
||||||
func assignDriverOptsByDriverInfo(opts map[string]string, driveInfo build.DriverInfo) map[string]string {
|
if strings.HasPrefix(endpointName, "kubernetes://") {
|
||||||
m := map[string]string{}
|
u, _ := url.Parse(endpointName)
|
||||||
|
if kubeconfig := u.Query().Get("kubeconfig"); kubeconfig != "" {
|
||||||
if len(driveInfo.Platform) > 0 {
|
_ = os.Setenv(clientcmd.RecommendedConfigPathEnvVar, kubeconfig)
|
||||||
m["platform"] = strings.Join(platformutil.Format(driveInfo.Platform), ",")
|
}
|
||||||
|
rules := clientcmd.NewDefaultClientConfigLoadingRules()
|
||||||
|
apiConfig, err := rules.Load()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return clientcmd.NewDefaultClientConfig(*apiConfig, &clientcmd.ConfigOverrides{}), nil
|
||||||
}
|
}
|
||||||
|
return kubernetes.ConfigFromContext(endpointName, s)
|
||||||
for key := range opts {
|
|
||||||
m[key] = opts[key]
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// clientForEndpoint returns a docker client for an endpoint
|
// clientForEndpoint returns a docker client for an endpoint
|
||||||
@@ -321,7 +221,7 @@ func getInstanceOrDefault(ctx context.Context, dockerCli command.Cli, instance,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getInstanceByName(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
|
func getInstanceByName(ctx context.Context, dockerCli command.Cli, instance, contextPathHash string) ([]build.DriverInfo, error) {
|
||||||
txn, release, err := getStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -336,14 +236,14 @@ func getInstanceByName(ctx context.Context, dockerCli command.Cli, instance, con
|
|||||||
|
|
||||||
// getDefaultDrivers returns drivers based on current cli config
|
// getDefaultDrivers returns drivers based on current cli config
|
||||||
func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly bool, contextPathHash string) ([]build.DriverInfo, error) {
|
func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly bool, contextPathHash string) ([]build.DriverInfo, error) {
|
||||||
txn, release, err := getStore(dockerCli)
|
txn, release, err := storeutil.GetStore(dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer release()
|
defer release()
|
||||||
|
|
||||||
if !defaultOnly {
|
if !defaultOnly {
|
||||||
ng, err := getCurrentInstance(txn, dockerCli)
|
ng, err := storeutil.GetCurrentInstance(txn, dockerCli)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -353,14 +253,20 @@ func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly b
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
d, err := driver.GetDriver(ctx, "buildx_buildkit_default", nil, dockerCli.Client(), dockerCli.ConfigFile(), nil, nil, "", nil, contextPathHash)
|
imageopt, err := storeutil.GetImageConfig(dockerCli, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
d, err := driver.GetDriver(ctx, "buildx_buildkit_default", nil, dockerCli.Client(), imageopt.Auth, nil, nil, nil, nil, nil, contextPathHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return []build.DriverInfo{
|
return []build.DriverInfo{
|
||||||
{
|
{
|
||||||
Name: "default",
|
Name: "default",
|
||||||
Driver: d,
|
Driver: d,
|
||||||
|
ImageOpt: imageopt,
|
||||||
},
|
},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@@ -418,13 +324,24 @@ func loadNodeGroupData(ctx context.Context, dockerCli command.Cli, ngi *nginfo)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// skip when multi drivers
|
kubernetesDriverCount := 0
|
||||||
if len(ngi.drivers) == 1 {
|
|
||||||
|
for _, di := range ngi.drivers {
|
||||||
|
if di.info != nil && len(di.info.DynamicNodes) > 0 {
|
||||||
|
kubernetesDriverCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
isAllKubernetesDrivers := len(ngi.drivers) == kubernetesDriverCount
|
||||||
|
|
||||||
|
if isAllKubernetesDrivers {
|
||||||
|
var drivers []dinfo
|
||||||
|
var dynamicNodes []store.Node
|
||||||
|
|
||||||
for _, di := range ngi.drivers {
|
for _, di := range ngi.drivers {
|
||||||
// dynamic nodes are used in Kubernetes driver.
|
// dynamic nodes are used in Kubernetes driver.
|
||||||
// Kubernetes pods are dynamically mapped to BuildKit Nodes.
|
// Kubernetes pods are dynamically mapped to BuildKit Nodes.
|
||||||
if di.info != nil && len(di.info.DynamicNodes) > 0 {
|
if di.info != nil && len(di.info.DynamicNodes) > 0 {
|
||||||
var drivers []dinfo
|
|
||||||
for i := 0; i < len(di.info.DynamicNodes); i++ {
|
for i := 0; i < len(di.info.DynamicNodes); i++ {
|
||||||
// all []dinfo share *build.DriverInfo and *driver.Info
|
// all []dinfo share *build.DriverInfo and *driver.Info
|
||||||
diClone := di
|
diClone := di
|
||||||
@@ -433,14 +350,16 @@ func loadNodeGroupData(ctx context.Context, dockerCli command.Cli, ngi *nginfo)
|
|||||||
}
|
}
|
||||||
drivers = append(drivers, di)
|
drivers = append(drivers, di)
|
||||||
}
|
}
|
||||||
// not append (remove the static nodes in the store)
|
dynamicNodes = append(dynamicNodes, di.info.DynamicNodes...)
|
||||||
ngi.ng.Nodes = di.info.DynamicNodes
|
|
||||||
ngi.ng.Dynamic = true
|
|
||||||
ngi.drivers = drivers
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// not append (remove the static nodes in the store)
|
||||||
|
ngi.ng.Nodes = dynamicNodes
|
||||||
|
ngi.drivers = drivers
|
||||||
|
ngi.ng.Dynamic = true
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -458,3 +377,56 @@ func (a *api) DockerAPI(name string) (dockerclient.APIClient, error) {
|
|||||||
}
|
}
|
||||||
return clientForEndpoint(a.dockerCli, name)
|
return clientForEndpoint(a.dockerCli, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type dinfo struct {
|
||||||
|
di *build.DriverInfo
|
||||||
|
info *driver.Info
|
||||||
|
platforms []specs.Platform
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type nginfo struct {
|
||||||
|
ng *store.NodeGroup
|
||||||
|
drivers []dinfo
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func boot(ctx context.Context, ngi *nginfo) (bool, error) {
|
||||||
|
toBoot := make([]int, 0, len(ngi.drivers))
|
||||||
|
for i, d := range ngi.drivers {
|
||||||
|
if d.err != nil || d.di.Err != nil || d.di.Driver == nil || d.info == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if d.info.Status != driver.Running {
|
||||||
|
toBoot = append(toBoot, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(toBoot) == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
printer := progress.NewPrinter(context.TODO(), os.Stderr, "auto")
|
||||||
|
|
||||||
|
baseCtx := ctx
|
||||||
|
eg, _ := errgroup.WithContext(ctx)
|
||||||
|
for _, idx := range toBoot {
|
||||||
|
func(idx int) {
|
||||||
|
eg.Go(func() error {
|
||||||
|
pw := progress.WithPrefix(printer, ngi.ng.Nodes[idx].Name, len(toBoot) > 1)
|
||||||
|
_, err := driver.Boot(ctx, baseCtx, ngi.drivers[idx].di.Driver, pw)
|
||||||
|
if err != nil {
|
||||||
|
ngi.drivers[idx].err = err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}(idx)
|
||||||
|
}
|
||||||
|
|
||||||
|
err := eg.Wait()
|
||||||
|
err1 := printer.Wait()
|
||||||
|
if err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package commands
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/util/cobrautil"
|
||||||
"github.com/docker/buildx/version"
|
"github.com/docker/buildx/version"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
"github.com/docker/cli/cli/command"
|
"github.com/docker/cli/cli/command"
|
||||||
@@ -17,11 +18,15 @@ func runVersion(dockerCli command.Cli) error {
|
|||||||
func versionCmd(dockerCli command.Cli) *cobra.Command {
|
func versionCmd(dockerCli command.Cli) *cobra.Command {
|
||||||
cmd := &cobra.Command{
|
cmd := &cobra.Command{
|
||||||
Use: "version",
|
Use: "version",
|
||||||
Short: "Show buildx version information ",
|
Short: "Show buildx version information",
|
||||||
Args: cli.ExactArgs(0),
|
Args: cli.ExactArgs(0),
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
return runVersion(dockerCli)
|
return runVersion(dockerCli)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hide builder persistent flag for this command
|
||||||
|
cobrautil.HideInheritedFlags(cmd, "builder")
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|||||||
144
docker-bake.hcl
Normal file
144
docker-bake.hcl
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
variable "GO_VERSION" {
|
||||||
|
default = "1.17"
|
||||||
|
}
|
||||||
|
variable "BIN_OUT" {
|
||||||
|
default = "./bin"
|
||||||
|
}
|
||||||
|
variable "RELEASE_OUT" {
|
||||||
|
default = "./release-out"
|
||||||
|
}
|
||||||
|
variable "DOCS_FORMATS" {
|
||||||
|
default = "md"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Special target: https://github.com/docker/metadata-action#bake-definition
|
||||||
|
target "meta-helper" {
|
||||||
|
tags = ["docker/buildx-bin:local"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "_common" {
|
||||||
|
args = {
|
||||||
|
GO_VERSION = GO_VERSION
|
||||||
|
BUILDKIT_CONTEXT_KEEP_GIT_DIR = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
group "default" {
|
||||||
|
targets = ["binaries"]
|
||||||
|
}
|
||||||
|
|
||||||
|
group "validate" {
|
||||||
|
targets = ["lint", "validate-vendor", "validate-docs"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "lint" {
|
||||||
|
inherits = ["_common"]
|
||||||
|
dockerfile = "./hack/dockerfiles/lint.Dockerfile"
|
||||||
|
output = ["type=cacheonly"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "validate-vendor" {
|
||||||
|
inherits = ["_common"]
|
||||||
|
dockerfile = "./hack/dockerfiles/vendor.Dockerfile"
|
||||||
|
target = "validate"
|
||||||
|
output = ["type=cacheonly"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "validate-docs" {
|
||||||
|
inherits = ["_common"]
|
||||||
|
args = {
|
||||||
|
FORMATS = DOCS_FORMATS
|
||||||
|
}
|
||||||
|
dockerfile = "./hack/dockerfiles/docs.Dockerfile"
|
||||||
|
target = "validate"
|
||||||
|
output = ["type=cacheonly"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "validate-authors" {
|
||||||
|
inherits = ["_common"]
|
||||||
|
dockerfile = "./hack/dockerfiles/authors.Dockerfile"
|
||||||
|
target = "validate"
|
||||||
|
output = ["type=cacheonly"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "update-vendor" {
|
||||||
|
inherits = ["_common"]
|
||||||
|
dockerfile = "./hack/dockerfiles/vendor.Dockerfile"
|
||||||
|
target = "update"
|
||||||
|
output = ["."]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "update-docs" {
|
||||||
|
inherits = ["_common"]
|
||||||
|
args = {
|
||||||
|
FORMATS = DOCS_FORMATS
|
||||||
|
}
|
||||||
|
dockerfile = "./hack/dockerfiles/docs.Dockerfile"
|
||||||
|
target = "update"
|
||||||
|
output = ["./docs/reference"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "update-authors" {
|
||||||
|
inherits = ["_common"]
|
||||||
|
dockerfile = "./hack/dockerfiles/authors.Dockerfile"
|
||||||
|
target = "update"
|
||||||
|
output = ["."]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "mod-outdated" {
|
||||||
|
inherits = ["_common"]
|
||||||
|
dockerfile = "./hack/dockerfiles/vendor.Dockerfile"
|
||||||
|
target = "outdated"
|
||||||
|
output = ["type=cacheonly"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "test" {
|
||||||
|
inherits = ["_common"]
|
||||||
|
target = "test-coverage"
|
||||||
|
output = ["./coverage"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "binaries" {
|
||||||
|
inherits = ["_common"]
|
||||||
|
target = "binaries"
|
||||||
|
output = [BIN_OUT]
|
||||||
|
platforms = ["local"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "binaries-cross" {
|
||||||
|
inherits = ["binaries"]
|
||||||
|
platforms = [
|
||||||
|
"darwin/amd64",
|
||||||
|
"darwin/arm64",
|
||||||
|
"linux/amd64",
|
||||||
|
"linux/arm/v6",
|
||||||
|
"linux/arm/v7",
|
||||||
|
"linux/arm64",
|
||||||
|
"linux/ppc64le",
|
||||||
|
"linux/riscv64",
|
||||||
|
"linux/s390x",
|
||||||
|
"windows/amd64",
|
||||||
|
"windows/arm64"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "release" {
|
||||||
|
inherits = ["binaries-cross"]
|
||||||
|
target = "release"
|
||||||
|
output = [RELEASE_OUT]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "image" {
|
||||||
|
inherits = ["meta-helper", "binaries"]
|
||||||
|
output = ["type=image"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "image-cross" {
|
||||||
|
inherits = ["meta-helper", "binaries-cross"]
|
||||||
|
output = ["type=image"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "image-local" {
|
||||||
|
inherits = ["image"]
|
||||||
|
output = ["type=docker"]
|
||||||
|
}
|
||||||
89
docs/generate.go
Normal file
89
docs/generate.go
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/commands"
|
||||||
|
clidocstool "github.com/docker/cli-docs-tool"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/pflag"
|
||||||
|
|
||||||
|
// import drivers otherwise factories are empty
|
||||||
|
// for --driver output flag usage
|
||||||
|
_ "github.com/docker/buildx/driver/docker"
|
||||||
|
_ "github.com/docker/buildx/driver/docker-container"
|
||||||
|
_ "github.com/docker/buildx/driver/kubernetes"
|
||||||
|
)
|
||||||
|
|
||||||
|
const defaultSourcePath = "docs/reference/"
|
||||||
|
|
||||||
|
type options struct {
|
||||||
|
source string
|
||||||
|
formats []string
|
||||||
|
}
|
||||||
|
|
||||||
|
func gen(opts *options) error {
|
||||||
|
log.SetFlags(0)
|
||||||
|
|
||||||
|
dockerCLI, err := command.NewDockerCli()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cmd := &cobra.Command{
|
||||||
|
Use: "docker [OPTIONS] COMMAND [ARG...]",
|
||||||
|
Short: "The base command for the Docker CLI.",
|
||||||
|
DisableAutoGenTag: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.AddCommand(commands.NewRootCmd("buildx", true, dockerCLI))
|
||||||
|
|
||||||
|
c, err := clidocstool.New(clidocstool.Options{
|
||||||
|
Root: cmd,
|
||||||
|
SourceDir: opts.source,
|
||||||
|
Plugin: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, format := range opts.formats {
|
||||||
|
switch format {
|
||||||
|
case "md":
|
||||||
|
if err = c.GenMarkdownTree(cmd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case "yaml":
|
||||||
|
if err = c.GenYamlTree(cmd); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return errors.Errorf("unknown format %q", format)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func run() error {
|
||||||
|
opts := &options{}
|
||||||
|
flags := pflag.NewFlagSet(os.Args[0], pflag.ContinueOnError)
|
||||||
|
flags.StringVar(&opts.source, "source", defaultSourcePath, "Docs source folder")
|
||||||
|
flags.StringSliceVar(&opts.formats, "formats", []string{}, "Format (md, yaml)")
|
||||||
|
if err := flags.Parse(os.Args[1:]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(opts.formats) == 0 {
|
||||||
|
return errors.New("Docs format required")
|
||||||
|
}
|
||||||
|
return gen(opts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if err := run(); err != nil {
|
||||||
|
log.Printf("ERROR: %+v", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
43
docs/reference/buildx.md
Normal file
43
docs/reference/buildx.md
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
# buildx
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx [OPTIONS] COMMAND
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Extended build capabilities with BuildKit
|
||||||
|
|
||||||
|
### Subcommands
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| [`bake`](buildx_bake.md) | Build from a file |
|
||||||
|
| [`build`](buildx_build.md) | Start a build |
|
||||||
|
| [`create`](buildx_create.md) | Create a new builder instance |
|
||||||
|
| [`du`](buildx_du.md) | Disk usage |
|
||||||
|
| [`imagetools`](buildx_imagetools.md) | Commands to work on images in registry |
|
||||||
|
| [`inspect`](buildx_inspect.md) | Inspect current builder instance |
|
||||||
|
| [`install`](buildx_install.md) | Install buildx as a 'docker builder' alias |
|
||||||
|
| [`ls`](buildx_ls.md) | List builder instances |
|
||||||
|
| [`prune`](buildx_prune.md) | Remove build cache |
|
||||||
|
| [`rm`](buildx_rm.md) | Remove a builder instance |
|
||||||
|
| [`stop`](buildx_stop.md) | Stop builder instance |
|
||||||
|
| [`uninstall`](buildx_uninstall.md) | Uninstall the 'docker builder' alias |
|
||||||
|
| [`use`](buildx_use.md) | Set the current builder instance |
|
||||||
|
| [`version`](buildx_version.md) | Show buildx version information |
|
||||||
|
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|
||||||
|
You can also use the `BUILDX_BUILDER` environment variable.
|
||||||
874
docs/reference/buildx_bake.md
Normal file
874
docs/reference/buildx_bake.md
Normal file
@@ -0,0 +1,874 @@
|
|||||||
|
# buildx bake
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx bake [OPTIONS] [TARGET...]
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Build from a file
|
||||||
|
|
||||||
|
### Aliases
|
||||||
|
|
||||||
|
`bake`, `f`
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
| [`-f`](#file), [`--file stringArray`](#file) | Build definition file |
|
||||||
|
| `--load` | Shorthand for `--set=*.output=type=docker` |
|
||||||
|
| `--metadata-file string` | Write build result metadata to the file |
|
||||||
|
| [`--no-cache`](#no-cache) | Do not use cache when building the image |
|
||||||
|
| [`--print`](#print) | Print the options without building |
|
||||||
|
| [`--progress string`](#progress) | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||||
|
| [`--pull`](#pull) | Always attempt to pull a newer version of the image |
|
||||||
|
| `--push` | Shorthand for `--set=*.output=type=registry` |
|
||||||
|
| [`--set stringArray`](#set) | Override target value (e.g., `targetpattern.key=value`) |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Bake is a high-level build command. Each specified target will run in parallel
|
||||||
|
as part of the build.
|
||||||
|
|
||||||
|
Read [High-level build options](https://github.com/docker/buildx#high-level-build-options)
|
||||||
|
for introduction.
|
||||||
|
|
||||||
|
Please note that `buildx bake` command may receive backwards incompatible
|
||||||
|
features in the future if needed. We are looking for feedback on improving the
|
||||||
|
command and extending the functionality further.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|
||||||
|
Same as [`buildx --builder`](buildx.md#builder).
|
||||||
|
|
||||||
|
### <a name="file"></a> Specify a build definition file (-f, --file)
|
||||||
|
|
||||||
|
By default, `buildx bake` looks for build definition files in the current
|
||||||
|
directory, the following are parsed:
|
||||||
|
|
||||||
|
- `docker-compose.yml`
|
||||||
|
- `docker-compose.yaml`
|
||||||
|
- `docker-bake.json`
|
||||||
|
- `docker-bake.override.json`
|
||||||
|
- `docker-bake.hcl`
|
||||||
|
- `docker-bake.override.hcl`
|
||||||
|
|
||||||
|
Use the `-f` / `--file` option to specify the build definition file to use. The
|
||||||
|
file can be a Docker Compose, JSON or HCL file. If multiple files are specified
|
||||||
|
they are all read and configurations are combined.
|
||||||
|
|
||||||
|
The following example uses a Docker Compose file named `docker-compose.dev.yaml`
|
||||||
|
as build definition file, and builds all targets in the file:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake -f docker-compose.dev.yaml
|
||||||
|
|
||||||
|
[+] Building 66.3s (30/30) FINISHED
|
||||||
|
=> [frontend internal] load build definition from Dockerfile 0.1s
|
||||||
|
=> => transferring dockerfile: 36B 0.0s
|
||||||
|
=> [backend internal] load build definition from Dockerfile 0.2s
|
||||||
|
=> => transferring dockerfile: 3.73kB 0.0s
|
||||||
|
=> [database internal] load build definition from Dockerfile 0.1s
|
||||||
|
=> => transferring dockerfile: 5.77kB 0.0s
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
Pass the names of the targets to build, to build only specific target(s). The
|
||||||
|
following example builds the `backend` and `database` targets that are defined
|
||||||
|
in the `docker-compose.dev.yaml` file, skipping the build for the `frontend`
|
||||||
|
target:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake -f docker-compose.dev.yaml backend database
|
||||||
|
|
||||||
|
[+] Building 2.4s (13/13) FINISHED
|
||||||
|
=> [backend internal] load build definition from Dockerfile 0.1s
|
||||||
|
=> => transferring dockerfile: 81B 0.0s
|
||||||
|
=> [database internal] load build definition from Dockerfile 0.2s
|
||||||
|
=> => transferring dockerfile: 36B 0.0s
|
||||||
|
=> [backend internal] load .dockerignore 0.3s
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also use a remote `git` bake definition:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake "git://github.com/docker/cli#master" --print
|
||||||
|
#1 [internal] load git source git://github.com/docker/cli#master
|
||||||
|
#1 0.686 2776a6d694f988c0c1df61cad4bfac0f54e481c8 refs/heads/master
|
||||||
|
#1 CACHED
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": [
|
||||||
|
"binary"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"binary": {
|
||||||
|
"context": "git://github.com/docker/cli#master",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"BASE_VARIANT": "alpine",
|
||||||
|
"GO_STRIP": "",
|
||||||
|
"VERSION": ""
|
||||||
|
},
|
||||||
|
"target": "binary",
|
||||||
|
"platforms": [
|
||||||
|
"local"
|
||||||
|
],
|
||||||
|
"output": [
|
||||||
|
"build"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
As you can see the context is fixed to `git://github.com/docker/cli` even if
|
||||||
|
[no context is actually defined](https://github.com/docker/cli/blob/2776a6d694f988c0c1df61cad4bfac0f54e481c8/docker-bake.hcl#L17-L26)
|
||||||
|
in the definition.
|
||||||
|
|
||||||
|
If you want to access the main context for bake command from a bake file
|
||||||
|
that has been imported remotely, you can use the `BAKE_CMD_CONTEXT` builtin var:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ cat https://raw.githubusercontent.com/tonistiigi/buildx/remote-test/docker-bake.hcl
|
||||||
|
target "default" {
|
||||||
|
context = BAKE_CMD_CONTEXT
|
||||||
|
dockerfile-inline = <<EOT
|
||||||
|
FROM alpine
|
||||||
|
WORKDIR /src
|
||||||
|
COPY . .
|
||||||
|
RUN ls -l && stop
|
||||||
|
EOT
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" --print
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": [
|
||||||
|
"default"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"default": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ touch foo bar
|
||||||
|
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test"
|
||||||
|
...
|
||||||
|
> [4/4] RUN ls -l && stop:
|
||||||
|
#8 0.101 total 0
|
||||||
|
#8 0.102 -rw-r--r-- 1 root root 0 Jul 27 18:47 bar
|
||||||
|
#8 0.102 -rw-r--r-- 1 root root 0 Jul 27 18:47 foo
|
||||||
|
#8 0.102 /bin/sh: stop: not found
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" "git://github.com/docker/cli#master" --print
|
||||||
|
#1 [internal] load git source git://github.com/tonistiigi/buildx#remote-test
|
||||||
|
#1 0.401 577303add004dd7efeb13434d69ea030d35f7888 refs/heads/remote-test
|
||||||
|
#1 CACHED
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": [
|
||||||
|
"default"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"default": {
|
||||||
|
"context": "git://github.com/docker/cli#master",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" "git://github.com/docker/cli#master"
|
||||||
|
...
|
||||||
|
> [4/4] RUN ls -l && stop:
|
||||||
|
#8 0.136 drwxrwxrwx 5 root root 4096 Jul 27 18:31 kubernetes
|
||||||
|
#8 0.136 drwxrwxrwx 3 root root 4096 Jul 27 18:31 man
|
||||||
|
#8 0.136 drwxrwxrwx 2 root root 4096 Jul 27 18:31 opts
|
||||||
|
#8 0.136 -rw-rw-rw- 1 root root 1893 Jul 27 18:31 poule.yml
|
||||||
|
#8 0.136 drwxrwxrwx 7 root root 4096 Jul 27 18:31 scripts
|
||||||
|
#8 0.136 drwxrwxrwx 3 root root 4096 Jul 27 18:31 service
|
||||||
|
#8 0.136 drwxrwxrwx 2 root root 4096 Jul 27 18:31 templates
|
||||||
|
#8 0.136 drwxrwxrwx 10 root root 4096 Jul 27 18:31 vendor
|
||||||
|
#8 0.136 -rwxrwxrwx 1 root root 9620 Jul 27 18:31 vendor.conf
|
||||||
|
#8 0.136 /bin/sh: stop: not found
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="no-cache"></a> Do not use cache when building the image (--no-cache)
|
||||||
|
|
||||||
|
Same as `build --no-cache`. Do not use cache when building the image.
|
||||||
|
|
||||||
|
### <a name="print"></a> Print the options without building (--print)
|
||||||
|
|
||||||
|
Prints the resulting options of the targets desired to be built, in a JSON
|
||||||
|
format, without starting a build.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake -f docker-bake.hcl --print db
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": [
|
||||||
|
"db"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"db": {
|
||||||
|
"context": "./",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"tags": [
|
||||||
|
"docker.io/tiborvass/db"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="progress"></a> Set type of progress output (--progress)
|
||||||
|
|
||||||
|
Same as [`build --progress`](buildx_build.md#progress). Set type of progress
|
||||||
|
output (auto, plain, tty). Use plain to show container output (default "auto").
|
||||||
|
|
||||||
|
> You can also use the `BUILDKIT_PROGRESS` environment variable to set its value.
|
||||||
|
|
||||||
|
The following example uses `plain` output during the build:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --progress=plain
|
||||||
|
|
||||||
|
#2 [backend internal] load build definition from Dockerfile.test
|
||||||
|
#2 sha256:de70cb0bb6ed8044f7b9b1b53b67f624e2ccfb93d96bb48b70c1fba562489618
|
||||||
|
#2 ...
|
||||||
|
|
||||||
|
#1 [database internal] load build definition from Dockerfile.test
|
||||||
|
#1 sha256:453cb50abd941762900a1212657a35fc4aad107f5d180b0ee9d93d6b74481bce
|
||||||
|
#1 transferring dockerfile: 36B done
|
||||||
|
#1 DONE 0.1s
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### <a name="pull"></a> Always attempt to pull a newer version of the image (--pull)
|
||||||
|
|
||||||
|
Same as `build --pull`.
|
||||||
|
|
||||||
|
### <a name="set"></a> Override target configurations from command line (--set)
|
||||||
|
|
||||||
|
```
|
||||||
|
--set targetpattern.key[.subkey]=value
|
||||||
|
```
|
||||||
|
|
||||||
|
Override target configurations from command line. The pattern matching syntax
|
||||||
|
is defined in https://golang.org/pkg/path/#Match.
|
||||||
|
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --set target.args.mybuildarg=value
|
||||||
|
$ docker buildx bake --set target.platform=linux/arm64
|
||||||
|
$ docker buildx bake --set foo*.args.mybuildarg=value # overrides build arg for all targets starting with 'foo'
|
||||||
|
$ docker buildx bake --set *.platform=linux/arm64 # overrides platform for all targets
|
||||||
|
$ docker buildx bake --set foo*.no-cache # bypass caching only for targets starting with 'foo'
|
||||||
|
```
|
||||||
|
|
||||||
|
Complete list of overridable fields:
|
||||||
|
`args`, `cache-from`, `cache-to`, `context`, `dockerfile`, `labels`, `no-cache`,
|
||||||
|
`output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target`
|
||||||
|
|
||||||
|
### File definition
|
||||||
|
|
||||||
|
In addition to compose files, bake supports a JSON and an equivalent HCL file
|
||||||
|
format for defining build groups and targets.
|
||||||
|
|
||||||
|
A target reflects a single docker build invocation with the same options that
|
||||||
|
you would specify for `docker build`. A group is a grouping of targets.
|
||||||
|
|
||||||
|
Multiple files can include the same target and final build options will be
|
||||||
|
determined by merging them together.
|
||||||
|
|
||||||
|
In the case of compose files, each service corresponds to a target.
|
||||||
|
|
||||||
|
A group can specify its list of targets with the `targets` option. A target can
|
||||||
|
inherit build options by setting the `inherits` option to the list of targets or
|
||||||
|
groups to inherit from.
|
||||||
|
|
||||||
|
Note: Design of bake command is work in progress, the user experience may change
|
||||||
|
based on feedback.
|
||||||
|
|
||||||
|
|
||||||
|
**Example HCL definition**
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
group "default" {
|
||||||
|
targets = ["db", "webapp-dev"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "webapp-dev" {
|
||||||
|
dockerfile = "Dockerfile.webapp"
|
||||||
|
tags = ["docker.io/username/webapp"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "webapp-release" {
|
||||||
|
inherits = ["webapp-dev"]
|
||||||
|
platforms = ["linux/amd64", "linux/arm64"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "db" {
|
||||||
|
dockerfile = "Dockerfile.db"
|
||||||
|
tags = ["docker.io/username/db"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Complete list of valid target fields:
|
||||||
|
|
||||||
|
`args`, `cache-from`, `cache-to`, `context`, `dockerfile`, `inherits`, `labels`,
|
||||||
|
`no-cache`, `output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target`
|
||||||
|
|
||||||
|
### Global scope attributes
|
||||||
|
|
||||||
|
You can define global scope attributes in HCL/JSON and use them for code reuse
|
||||||
|
and setting values for variables. This means you can do a "data-only" HCL file
|
||||||
|
with the values you want to set/override and use it in the list of regular
|
||||||
|
output files.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake.hcl
|
||||||
|
variable "FOO" {
|
||||||
|
default = "abc"
|
||||||
|
}
|
||||||
|
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
v1 = "pre-${FOO}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
You can use this file directly:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print app
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": [
|
||||||
|
"app"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"app": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"v1": "pre-abc"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Or create an override configuration file:
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# env.hcl
|
||||||
|
WHOAMI="myuser"
|
||||||
|
FOO="def-${WHOAMI}"
|
||||||
|
```
|
||||||
|
|
||||||
|
And invoke bake together with both of the files:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake -f docker-bake.hcl -f env.hcl --print app
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": [
|
||||||
|
"app"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"app": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"v1": "pre-def-myuser"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### HCL variables and functions
|
||||||
|
|
||||||
|
Similar to how Terraform provides a way to [define variables](https://www.terraform.io/docs/configuration/variables.html#declaring-an-input-variable),
|
||||||
|
the HCL file format also supports variable block definitions. These can be used
|
||||||
|
to define variables with values provided by the current environment, or a
|
||||||
|
default value when unset.
|
||||||
|
|
||||||
|
A [set of generally useful functions](https://github.com/docker/buildx/blob/master/bake/hclparser/stdlib.go)
|
||||||
|
provided by [go-cty](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib)
|
||||||
|
are available for use in HCL files. In addition, [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc)
|
||||||
|
are also supported.
|
||||||
|
|
||||||
|
#### Using interpolation to tag an image with the git sha
|
||||||
|
|
||||||
|
Bake supports variable blocks which are assigned to matching environment
|
||||||
|
variables or default values.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake.hcl
|
||||||
|
variable "TAG" {
|
||||||
|
default = "latest"
|
||||||
|
}
|
||||||
|
|
||||||
|
group "default" {
|
||||||
|
targets = ["webapp"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "webapp" {
|
||||||
|
tags = ["docker.io/username/webapp:${TAG}"]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print webapp
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": [
|
||||||
|
"webapp"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"webapp": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"tags": [
|
||||||
|
"docker.io/username/webapp:latest"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ TAG=$(git rev-parse --short HEAD) docker buildx bake --print webapp
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": [
|
||||||
|
"webapp"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"webapp": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"tags": [
|
||||||
|
"docker.io/username/webapp:985e9e9"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Using the `add` function
|
||||||
|
|
||||||
|
You can use [`go-cty` stdlib functions]([go-cty](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib)).
|
||||||
|
Here we are using the `add` function.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake.hcl
|
||||||
|
variable "TAG" {
|
||||||
|
default = "latest"
|
||||||
|
}
|
||||||
|
|
||||||
|
group "default" {
|
||||||
|
targets = ["webapp"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "webapp" {
|
||||||
|
args = {
|
||||||
|
buildno = "${add(123, 1)}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print webapp
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": [
|
||||||
|
"webapp"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"webapp": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"buildno": "124"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Defining an `increment` function
|
||||||
|
|
||||||
|
It also supports [user defined functions](https://github.com/hashicorp/hcl/tree/main/ext/userfunc).
|
||||||
|
The following example defines a simple an `increment` function.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake.hcl
|
||||||
|
function "increment" {
|
||||||
|
params = [number]
|
||||||
|
result = number + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
group "default" {
|
||||||
|
targets = ["webapp"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "webapp" {
|
||||||
|
args = {
|
||||||
|
buildno = "${increment(123)}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print webapp
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": [
|
||||||
|
"webapp"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"webapp": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"buildno": "124"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Only adding tags if a variable is not empty using an `notequal`
|
||||||
|
|
||||||
|
Here we are using the conditional `notequal` function which is just for
|
||||||
|
symmetry with the `equal` one.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake.hcl
|
||||||
|
variable "TAG" {default="" }
|
||||||
|
|
||||||
|
group "default" {
|
||||||
|
targets = [
|
||||||
|
"webapp",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "webapp" {
|
||||||
|
context="."
|
||||||
|
dockerfile="Dockerfile"
|
||||||
|
tags = [
|
||||||
|
"my-image:latest",
|
||||||
|
notequal("",TAG) ? "my-image:${TAG}": "",
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print webapp
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": [
|
||||||
|
"webapp"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"webapp": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"tags": [
|
||||||
|
"my-image:latest"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Using variables in functions
|
||||||
|
|
||||||
|
You can refer variables to other variables like the target blocks can. Stdlib
|
||||||
|
functions can also be called but user functions can't at the moment.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake.hcl
|
||||||
|
variable "REPO" {
|
||||||
|
default = "user/repo"
|
||||||
|
}
|
||||||
|
|
||||||
|
function "tag" {
|
||||||
|
params = [tag]
|
||||||
|
result = ["${REPO}:${tag}"]
|
||||||
|
}
|
||||||
|
|
||||||
|
target "webapp" {
|
||||||
|
tags = tag("v1")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print webapp
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": [
|
||||||
|
"webapp"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"webapp": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"tags": [
|
||||||
|
"user/repo:v1"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Using variables in variables across files
|
||||||
|
|
||||||
|
When multiple files are specified, one file can use variables defined in
|
||||||
|
another file.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake1.hcl
|
||||||
|
variable "FOO" {
|
||||||
|
default = upper("${BASE}def")
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "BAR" {
|
||||||
|
default = "-${FOO}-"
|
||||||
|
}
|
||||||
|
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
v1 = "pre-${BAR}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake2.hcl
|
||||||
|
variable "BASE" {
|
||||||
|
default = "abc"
|
||||||
|
}
|
||||||
|
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
v2 = "${FOO}-post"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake -f docker-bake1.hcl -f docker-bake2.hcl --print app
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": [
|
||||||
|
"app"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"app": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"v1": "pre--ABCDEF-",
|
||||||
|
"v2": "ABCDEF-post"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Using typed variables
|
||||||
|
|
||||||
|
Non-string variables are also accepted. The value passed with env is parsed
|
||||||
|
into suitable type first.
|
||||||
|
|
||||||
|
```hcl
|
||||||
|
# docker-bake.hcl
|
||||||
|
variable "FOO" {
|
||||||
|
default = 3
|
||||||
|
}
|
||||||
|
|
||||||
|
variable "IS_FOO" {
|
||||||
|
default = true
|
||||||
|
}
|
||||||
|
|
||||||
|
target "app" {
|
||||||
|
args = {
|
||||||
|
v1 = FOO > 5 ? "higher" : "lower"
|
||||||
|
v2 = IS_FOO ? "yes" : "no"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print app
|
||||||
|
{
|
||||||
|
"group": {
|
||||||
|
"default": [
|
||||||
|
"app"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"target": {
|
||||||
|
"app": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"v1": "lower",
|
||||||
|
"v2": "yes"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Extension field with Compose
|
||||||
|
|
||||||
|
[Special extension](https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension)
|
||||||
|
field `x-bake` can be used in your compose file to evaluate fields that are not
|
||||||
|
(yet) available in the [build definition](https://github.com/compose-spec/compose-spec/blob/master/build.md#build-definition).
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
# docker-compose.yml
|
||||||
|
services:
|
||||||
|
addon:
|
||||||
|
image: ct-addon:bar
|
||||||
|
build:
|
||||||
|
context: .
|
||||||
|
dockerfile: ./Dockerfile
|
||||||
|
args:
|
||||||
|
CT_ECR: foo
|
||||||
|
CT_TAG: bar
|
||||||
|
x-bake:
|
||||||
|
tags:
|
||||||
|
- ct-addon:foo
|
||||||
|
- ct-addon:alp
|
||||||
|
platforms:
|
||||||
|
- linux/amd64
|
||||||
|
- linux/arm64
|
||||||
|
cache-from:
|
||||||
|
- user/app:cache
|
||||||
|
- type=local,src=path/to/cache
|
||||||
|
cache-to: type=local,dest=path/to/cache
|
||||||
|
pull: true
|
||||||
|
|
||||||
|
aws:
|
||||||
|
image: ct-fake-aws:bar
|
||||||
|
build:
|
||||||
|
dockerfile: ./aws.Dockerfile
|
||||||
|
args:
|
||||||
|
CT_ECR: foo
|
||||||
|
CT_TAG: bar
|
||||||
|
x-bake:
|
||||||
|
secret:
|
||||||
|
- id=mysecret,src=./secret
|
||||||
|
- id=mysecret2,src=./secret2
|
||||||
|
platforms: linux/arm64
|
||||||
|
output: type=docker
|
||||||
|
no-cache: true
|
||||||
|
```
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx bake --print
|
||||||
|
{
|
||||||
|
"target": {
|
||||||
|
"addon": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "./Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"CT_ECR": "foo",
|
||||||
|
"CT_TAG": "bar"
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"ct-addon:foo",
|
||||||
|
"ct-addon:alp"
|
||||||
|
],
|
||||||
|
"cache-from": [
|
||||||
|
"user/app:cache",
|
||||||
|
"type=local,src=path/to/cache"
|
||||||
|
],
|
||||||
|
"cache-to": [
|
||||||
|
"type=local,dest=path/to/cache"
|
||||||
|
],
|
||||||
|
"platforms": [
|
||||||
|
"linux/amd64",
|
||||||
|
"linux/arm64"
|
||||||
|
],
|
||||||
|
"pull": true
|
||||||
|
},
|
||||||
|
"aws": {
|
||||||
|
"context": ".",
|
||||||
|
"dockerfile": "./aws.Dockerfile",
|
||||||
|
"args": {
|
||||||
|
"CT_ECR": "foo",
|
||||||
|
"CT_TAG": "bar"
|
||||||
|
},
|
||||||
|
"tags": [
|
||||||
|
"ct-fake-aws:bar"
|
||||||
|
],
|
||||||
|
"secret": [
|
||||||
|
"id=mysecret,src=./secret",
|
||||||
|
"id=mysecret2,src=./secret2"
|
||||||
|
],
|
||||||
|
"platforms": [
|
||||||
|
"linux/arm64"
|
||||||
|
],
|
||||||
|
"output": [
|
||||||
|
"type=docker"
|
||||||
|
],
|
||||||
|
"no-cache": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Complete list of valid fields for `x-bake`:
|
||||||
|
|
||||||
|
`tags`, `cache-from`, `cache-to`, `secret`, `ssh`, `platforms`, `output`,
|
||||||
|
`pull`, `no-cache`
|
||||||
|
|
||||||
|
### Built-in variables
|
||||||
|
|
||||||
|
* `BAKE_CMD_CONTEXT` can be used to access the main `context` for bake command
|
||||||
|
from a bake file that has been [imported remotely](#file).
|
||||||
|
* `BAKE_LOCAL_PLATFORM` returns the current platform's default platform
|
||||||
|
specification (e.g. `linux/amd64`).
|
||||||
347
docs/reference/buildx_build.md
Normal file
347
docs/reference/buildx_build.md
Normal file
@@ -0,0 +1,347 @@
|
|||||||
|
# buildx build
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx build [OPTIONS] PATH | URL | -
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Start a build
|
||||||
|
|
||||||
|
### Aliases
|
||||||
|
|
||||||
|
`build`, `b`
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| [`--add-host stringSlice`](https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host) | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||||
|
| [`--allow stringSlice`](#allow) | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||||
|
| [`--build-arg stringArray`](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg) | Set build-time variables |
|
||||||
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
| [`--cache-from stringArray`](#cache-from) | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
||||||
|
| [`--cache-to stringArray`](#cache-to) | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
||||||
|
| [`--cgroup-parent string`](https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent) | Optional parent cgroup for the container |
|
||||||
|
| [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file string`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||||
|
| `--iidfile string` | Write the image ID to the file |
|
||||||
|
| `--label stringArray` | Set metadata for an image |
|
||||||
|
| [`--load`](#load) | Shorthand for `--output=type=docker` |
|
||||||
|
| `--metadata-file string` | Write build result metadata to the file |
|
||||||
|
| `--network string` | Set the networking mode for the RUN instructions during build |
|
||||||
|
| `--no-cache` | Do not use cache when building the image |
|
||||||
|
| [`-o`](#output), [`--output stringArray`](#output) | Output destination (format: `type=local,dest=path`) |
|
||||||
|
| [`--platform stringArray`](#platform) | Set target platform for build |
|
||||||
|
| [`--progress string`](#progress) | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||||
|
| `--pull` | Always attempt to pull a newer version of the image |
|
||||||
|
| [`--push`](#push) | Shorthand for `--output=type=registry` |
|
||||||
|
| `-q`, `--quiet` | Suppress the build output and print image ID on success |
|
||||||
|
| `--secret stringArray` | Secret file to expose to the build (format: `id=mysecret,src=/local/secret`) |
|
||||||
|
| [`--shm-size bytes`](#shm-size) | Size of `/dev/shm` |
|
||||||
|
| `--ssh stringArray` | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
||||||
|
| [`-t`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t), [`--tag stringArray`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t) | Name and optionally a tag (format: `name:tag`) |
|
||||||
|
| [`--target string`](https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target) | Set the target build stage to build. |
|
||||||
|
| [`--ulimit ulimit`](#ulimit) | Ulimit options |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
The `buildx build` command starts a build using BuildKit. This command is similar
|
||||||
|
to the UI of `docker build` command and takes the same flags and arguments.
|
||||||
|
|
||||||
|
For documentation on most of these flags, refer to the [`docker build`
|
||||||
|
documentation](https://docs.docker.com/engine/reference/commandline/build/). In
|
||||||
|
here we’ll document a subset of the new flags.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|
||||||
|
Same as [`buildx --builder`](buildx.md#builder).
|
||||||
|
|
||||||
|
### <a name="platform"></a> Set the target platforms for the build (--platform)
|
||||||
|
|
||||||
|
```
|
||||||
|
--platform=value[,value]
|
||||||
|
```
|
||||||
|
|
||||||
|
Set the target platform for the build. All `FROM` commands inside the Dockerfile
|
||||||
|
without their own `--platform` flag will pull base images for this platform and
|
||||||
|
this value will also be the platform of the resulting image. The default value
|
||||||
|
will be the current platform of the buildkit daemon.
|
||||||
|
|
||||||
|
When using `docker-container` driver with `buildx`, this flag can accept multiple
|
||||||
|
values as an input separated by a comma. With multiple values the result will be
|
||||||
|
built for all of the specified platforms and joined together into a single manifest
|
||||||
|
list.
|
||||||
|
|
||||||
|
If the `Dockerfile` needs to invoke the `RUN` command, the builder needs runtime
|
||||||
|
support for the specified platform. In a clean setup, you can only execute `RUN`
|
||||||
|
commands for your system architecture.
|
||||||
|
If your kernel supports [`binfmt_misc`](https://en.wikipedia.org/wiki/Binfmt_misc)
|
||||||
|
launchers for secondary architectures, buildx will pick them up automatically.
|
||||||
|
Docker desktop releases come with `binfmt_misc` automatically configured for `arm64`
|
||||||
|
and `arm` architectures. You can see what runtime platforms your current builder
|
||||||
|
instance supports by running `docker buildx inspect --bootstrap`.
|
||||||
|
|
||||||
|
Inside a `Dockerfile`, you can access the current platform value through
|
||||||
|
`TARGETPLATFORM` build argument. Please refer to the [`docker build`
|
||||||
|
documentation](https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope)
|
||||||
|
for the full description of automatic platform argument variants .
|
||||||
|
|
||||||
|
The formatting for the platform specifier is defined in the [containerd source
|
||||||
|
code](https://github.com/containerd/containerd/blob/v1.4.3/platforms/platforms.go#L63).
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx build --platform=linux/arm64 .
|
||||||
|
$ docker buildx build --platform=linux/amd64,linux/arm64,linux/arm/v7 .
|
||||||
|
$ docker buildx build --platform=darwin .
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="progress"></a> Set type of progress output (--progress)
|
||||||
|
|
||||||
|
```
|
||||||
|
--progress=VALUE
|
||||||
|
```
|
||||||
|
|
||||||
|
Set type of progress output (auto, plain, tty). Use plain to show container
|
||||||
|
output (default "auto").
|
||||||
|
|
||||||
|
> You can also use the `BUILDKIT_PROGRESS` environment variable to set
|
||||||
|
> its value.
|
||||||
|
|
||||||
|
The following example uses `plain` output during the build:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx build --load --progress=plain .
|
||||||
|
|
||||||
|
#1 [internal] load build definition from Dockerfile
|
||||||
|
#1 transferring dockerfile: 227B 0.0s done
|
||||||
|
#1 DONE 0.1s
|
||||||
|
|
||||||
|
#2 [internal] load .dockerignore
|
||||||
|
#2 transferring context: 129B 0.0s done
|
||||||
|
#2 DONE 0.0s
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="output"></a> Set the export action for the build result (-o, --output)
|
||||||
|
|
||||||
|
```
|
||||||
|
-o, --output=[PATH,-,type=TYPE[,KEY=VALUE]
|
||||||
|
```
|
||||||
|
|
||||||
|
Sets the export action for the build result. In `docker build` all builds finish
|
||||||
|
by creating a container image and exporting it to `docker images`. `buildx` makes
|
||||||
|
this step configurable allowing results to be exported directly to the client,
|
||||||
|
oci image tarballs, registry etc.
|
||||||
|
|
||||||
|
Buildx with `docker` driver currently only supports local, tarball exporter and
|
||||||
|
image exporter. `docker-container` driver supports all the exporters.
|
||||||
|
|
||||||
|
If just the path is specified as a value, `buildx` will use the local exporter
|
||||||
|
with this path as the destination. If the value is "-", `buildx` will use `tar`
|
||||||
|
exporter and write to `stdout`.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx build -o . .
|
||||||
|
$ docker buildx build -o outdir .
|
||||||
|
$ docker buildx build -o - - > out.tar
|
||||||
|
$ docker buildx build -o type=docker .
|
||||||
|
$ docker buildx build -o type=docker,dest=- . > myimage.tar
|
||||||
|
$ docker buildx build -t tonistiigi/foo -o type=registry
|
||||||
|
```
|
||||||
|
|
||||||
|
Supported exported types are:
|
||||||
|
|
||||||
|
#### `local`
|
||||||
|
|
||||||
|
The `local` export type writes all result files to a directory on the client. The
|
||||||
|
new files will be owned by the current user. On multi-platform builds, all results
|
||||||
|
will be put in subdirectories by their platform.
|
||||||
|
|
||||||
|
Attribute key:
|
||||||
|
|
||||||
|
- `dest` - destination directory where files will be written
|
||||||
|
|
||||||
|
#### `tar`
|
||||||
|
|
||||||
|
The `tar` export type writes all result files as a single tarball on the client.
|
||||||
|
On multi-platform builds all results will be put in subdirectories by their platform.
|
||||||
|
|
||||||
|
Attribute key:
|
||||||
|
|
||||||
|
- `dest` - destination path where tarball will be written. “-” writes to stdout.
|
||||||
|
|
||||||
|
#### `oci`
|
||||||
|
|
||||||
|
The `oci` export type writes the result image or manifest list as an [OCI image
|
||||||
|
layout](https://github.com/opencontainers/image-spec/blob/v1.0.1/image-layout.md)
|
||||||
|
tarball on the client.
|
||||||
|
|
||||||
|
Attribute key:
|
||||||
|
|
||||||
|
- `dest` - destination path where tarball will be written. “-” writes to stdout.
|
||||||
|
|
||||||
|
#### `docker`
|
||||||
|
|
||||||
|
The `docker` export type writes the single-platform result image as a [Docker image
|
||||||
|
specification](https://github.com/docker/docker/blob/v20.10.2/image/spec/v1.2.md)
|
||||||
|
tarball on the client. Tarballs created by this exporter are also OCI compatible.
|
||||||
|
|
||||||
|
Currently, multi-platform images cannot be exported with the `docker` export type.
|
||||||
|
The most common usecase for multi-platform images is to directly push to a registry
|
||||||
|
(see [`registry`](#registry)).
|
||||||
|
|
||||||
|
Attribute keys:
|
||||||
|
|
||||||
|
- `dest` - destination path where tarball will be written. If not specified the
|
||||||
|
tar will be loaded automatically to the current docker instance.
|
||||||
|
- `context` - name for the docker context where to import the result
|
||||||
|
|
||||||
|
#### `image`
|
||||||
|
|
||||||
|
The `image` exporter writes the build result as an image or a manifest list. When
|
||||||
|
using `docker` driver the image will appear in `docker images`. Optionally, image
|
||||||
|
can be automatically pushed to a registry by specifying attributes.
|
||||||
|
|
||||||
|
Attribute keys:
|
||||||
|
|
||||||
|
- `name` - name (references) for the new image.
|
||||||
|
- `push` - boolean to automatically push the image.
|
||||||
|
|
||||||
|
#### `registry`
|
||||||
|
|
||||||
|
The `registry` exporter is a shortcut for `type=image,push=true`.
|
||||||
|
|
||||||
|
|
||||||
|
### <a name="push"></a> Push the build result to a registry (--push)
|
||||||
|
|
||||||
|
Shorthand for [`--output=type=registry`](#registry). Will automatically push the
|
||||||
|
build result to registry.
|
||||||
|
|
||||||
|
### <a name="load"></a> Load the single-platform build result to `docker images` (--load)
|
||||||
|
|
||||||
|
Shorthand for [`--output=type=docker`](#docker). Will automatically load the
|
||||||
|
single-platform build result to `docker images`.
|
||||||
|
|
||||||
|
### <a name="cache-from"></a> Use an external cache source for a build (--cache-from)
|
||||||
|
|
||||||
|
```
|
||||||
|
--cache-from=[NAME|type=TYPE[,KEY=VALUE]]
|
||||||
|
```
|
||||||
|
|
||||||
|
Use an external cache source for a build. Supported types are `registry`,
|
||||||
|
`local` and `gha`.
|
||||||
|
|
||||||
|
- [`registry` source](https://github.com/moby/buildkit#registry-push-image-and-cache-separately)
|
||||||
|
can import cache from a cache manifest or (special) image configuration on the
|
||||||
|
registry.
|
||||||
|
- [`local` source](https://github.com/moby/buildkit#local-directory-1) can
|
||||||
|
import cache from local files previously exported with `--cache-to`.
|
||||||
|
- [`gha` source](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
||||||
|
can import cache from a previously exported cache with `--cache-to` in your
|
||||||
|
GitHub repository
|
||||||
|
|
||||||
|
If no type is specified, `registry` exporter is used with a specified reference.
|
||||||
|
|
||||||
|
`docker` driver currently only supports importing build cache from the registry.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx build --cache-from=user/app:cache .
|
||||||
|
$ docker buildx build --cache-from=user/app .
|
||||||
|
$ docker buildx build --cache-from=type=registry,ref=user/app .
|
||||||
|
$ docker buildx build --cache-from=type=local,src=path/to/cache .
|
||||||
|
$ docker buildx build --cache-from=type=gha .
|
||||||
|
```
|
||||||
|
|
||||||
|
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
||||||
|
|
||||||
|
### <a name="cache-to"></a> Export build cache to an external cache destination (--cache-to)
|
||||||
|
|
||||||
|
```
|
||||||
|
--cache-to=[NAME|type=TYPE[,KEY=VALUE]]
|
||||||
|
```
|
||||||
|
|
||||||
|
Export build cache to an external cache destination. Supported types are
|
||||||
|
`registry`, `local`, `inline` and `gha`.
|
||||||
|
|
||||||
|
- [`registry` type](https://github.com/moby/buildkit#registry-push-image-and-cache-separately) exports build cache to a cache manifest in the registry.
|
||||||
|
- [`local` type](https://github.com/moby/buildkit#local-directory-1) type
|
||||||
|
exports cache to a local directory on the client.
|
||||||
|
- [`inline` type](https://github.com/moby/buildkit#inline-push-image-and-cache-together)
|
||||||
|
type writes the cache metadata into the image configuration.
|
||||||
|
- [`gha` type](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
||||||
|
type exports cache through the [Github Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication).
|
||||||
|
|
||||||
|
`docker` driver currently only supports exporting inline cache metadata to image
|
||||||
|
configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used
|
||||||
|
to trigger inline cache exporter.
|
||||||
|
|
||||||
|
Attribute key:
|
||||||
|
|
||||||
|
- `mode` - Specifies how many layers are exported with the cache. `min` on only
|
||||||
|
exports layers already in the final build stage, `max` exports layers for
|
||||||
|
all stages. Metadata is always exported for the whole build.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx build --cache-to=user/app:cache .
|
||||||
|
$ docker buildx build --cache-to=type=inline .
|
||||||
|
$ docker buildx build --cache-to=type=registry,ref=user/app .
|
||||||
|
$ docker buildx build --cache-to=type=local,dest=path/to/cache .
|
||||||
|
$ docker buildx build --cache-to=type=gha .
|
||||||
|
```
|
||||||
|
|
||||||
|
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
||||||
|
|
||||||
|
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
|
||||||
|
|
||||||
|
```
|
||||||
|
--allow=ENTITLEMENT
|
||||||
|
```
|
||||||
|
|
||||||
|
Allow extra privileged entitlement. List of entitlements:
|
||||||
|
|
||||||
|
- `network.host` - Allows executions with host networking.
|
||||||
|
- `security.insecure` - Allows executions without sandbox. See
|
||||||
|
[related Dockerfile extensions](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#run---securityinsecuresandbox).
|
||||||
|
|
||||||
|
For entitlements to be enabled, the `buildkitd` daemon also needs to allow them
|
||||||
|
with `--allow-insecure-entitlement` (see [`create --buildkitd-flags`](buildx_create.md#buildkitd-flags))
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx create --use --name insecure-builder --buildkitd-flags '--allow-insecure-entitlement security.insecure'
|
||||||
|
$ docker buildx build --allow security.insecure .
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="shm-size"></a> Size of `/dev/shm` (--shm-size)
|
||||||
|
|
||||||
|
The format is `<number><unit>`. `number` must be greater than `0`. Unit is
|
||||||
|
optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g`
|
||||||
|
(gigabytes). If you omit the unit, the system uses bytes.
|
||||||
|
|
||||||
|
### <a name="ulimit"></a> Set ulimits (--ulimit)
|
||||||
|
|
||||||
|
`--ulimit` is specified with a soft and hard limit as such:
|
||||||
|
`<type>=<soft limit>[:<hard limit>]`, for example:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx build --ulimit nofile=1024:1024 .
|
||||||
|
```
|
||||||
|
|
||||||
|
> **Note**
|
||||||
|
>
|
||||||
|
> If you do not provide a `hard limit`, the `soft limit` is used
|
||||||
|
> for both values. If no `ulimits` are set, they are inherited from
|
||||||
|
> the default `ulimits` set on the daemon.
|
||||||
229
docs/reference/buildx_create.md
Normal file
229
docs/reference/buildx_create.md
Normal file
@@ -0,0 +1,229 @@
|
|||||||
|
# buildx create
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx create [OPTIONS] [CONTEXT|ENDPOINT]
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Create a new builder instance
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| [`--append`](#append) | Append a node to builder instead of changing it |
|
||||||
|
| `--bootstrap` | Boot builder after creation |
|
||||||
|
| [`--buildkitd-flags string`](#buildkitd-flags) | Flags for buildkitd daemon |
|
||||||
|
| [`--config string`](#config) | BuildKit config file |
|
||||||
|
| [`--driver string`](#driver) | Driver to use (available: `docker`, `docker-container`, `kubernetes`) |
|
||||||
|
| [`--driver-opt stringArray`](#driver-opt) | Options for the driver |
|
||||||
|
| [`--leave`](#leave) | Remove a node from builder instead of changing it |
|
||||||
|
| [`--name string`](#name) | Builder instance name |
|
||||||
|
| [`--node string`](#node) | Create/modify node with given name |
|
||||||
|
| [`--platform stringArray`](#platform) | Fixed platforms for current node |
|
||||||
|
| [`--use`](#use) | Set the current builder instance |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Create makes a new builder instance pointing to a docker context or endpoint,
|
||||||
|
where context is the name of a context from `docker context ls` and endpoint is
|
||||||
|
the address for docker socket (eg. `DOCKER_HOST` value).
|
||||||
|
|
||||||
|
By default, the current Docker configuration is used for determining the
|
||||||
|
context/endpoint value.
|
||||||
|
|
||||||
|
Builder instances are isolated environments where builds can be invoked. All
|
||||||
|
Docker contexts also get the default builder instance.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="append"></a> Append a new node to an existing builder (--append)
|
||||||
|
|
||||||
|
The `--append` flag changes the action of the command to append a new node to an
|
||||||
|
existing builder specified by `--name`. Buildx will choose an appropriate node
|
||||||
|
for a build based on the platforms it supports.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx create mycontext1
|
||||||
|
eager_beaver
|
||||||
|
|
||||||
|
$ docker buildx create --name eager_beaver --append mycontext2
|
||||||
|
eager_beaver
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="buildkitd-flags"></a> Specify options for the buildkitd daemon (--buildkitd-flags)
|
||||||
|
|
||||||
|
```
|
||||||
|
--buildkitd-flags FLAGS
|
||||||
|
```
|
||||||
|
|
||||||
|
Adds flags when starting the buildkitd daemon. They take precedence over the
|
||||||
|
configuration file specified by [`--config`](#config). See `buildkitd --help`
|
||||||
|
for the available flags.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```
|
||||||
|
--buildkitd-flags '--debug --debugaddr 0.0.0.0:6666'
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="config"></a> Specify a configuration file for the buildkitd daemon (--config)
|
||||||
|
|
||||||
|
```
|
||||||
|
--config FILE
|
||||||
|
```
|
||||||
|
|
||||||
|
Specifies the configuration file for the buildkitd daemon to use. The configuration
|
||||||
|
can be overridden by [`--buildkitd-flags`](#buildkitd-flags).
|
||||||
|
See an [example buildkitd configuration file](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md).
|
||||||
|
|
||||||
|
Note that if you create a `docker-container` builder and have specified
|
||||||
|
certificates for registries in the `buildkitd.toml` configuration, the files
|
||||||
|
will be copied into the container under `/etc/buildkit/certs` and configuration
|
||||||
|
will be updated to reflect that.
|
||||||
|
|
||||||
|
### <a name="driver"></a> Set the builder driver to use (--driver)
|
||||||
|
|
||||||
|
```
|
||||||
|
--driver DRIVER
|
||||||
|
```
|
||||||
|
|
||||||
|
Sets the builder driver to be used. There are two available drivers, each have
|
||||||
|
their own specificities.
|
||||||
|
|
||||||
|
#### `docker` driver
|
||||||
|
|
||||||
|
Uses the builder that is built into the docker daemon. With this driver,
|
||||||
|
the [`--load`](buildx_build.md#load) flag is implied by default on
|
||||||
|
`buildx build`. However, building multi-platform images or exporting cache is
|
||||||
|
not currently supported.
|
||||||
|
|
||||||
|
#### `docker-container` driver
|
||||||
|
|
||||||
|
Uses a BuildKit container that will be spawned via docker. With this driver,
|
||||||
|
both building multi-platform images and exporting cache are supported.
|
||||||
|
|
||||||
|
Unlike `docker` driver, built images will not automatically appear in
|
||||||
|
`docker images` and [`build --load`](buildx_build.md#load) needs to be used
|
||||||
|
to achieve that.
|
||||||
|
|
||||||
|
#### `kubernetes` driver
|
||||||
|
|
||||||
|
Uses a kubernetes pods. With this driver, you can spin up pods with defined
|
||||||
|
BuildKit container image to build your images.
|
||||||
|
|
||||||
|
Unlike `docker` driver, built images will not automatically appear in
|
||||||
|
`docker images` and [`build --load`](buildx_build.md#load) needs to be used
|
||||||
|
to achieve that.
|
||||||
|
|
||||||
|
### <a name="driver-opt"></a> Set additional driver-specific options (--driver-opt)
|
||||||
|
|
||||||
|
```
|
||||||
|
--driver-opt OPTIONS
|
||||||
|
```
|
||||||
|
|
||||||
|
Passes additional driver-specific options. Details for each driver:
|
||||||
|
|
||||||
|
- `docker` - No driver options
|
||||||
|
- `docker-container`
|
||||||
|
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||||
|
- `network=NETMODE` - Sets the network mode for running the buildkit container.
|
||||||
|
- `cgroup-parent=CGROUP` - Sets the cgroup parent of the buildkit container if docker is using the "cgroupfs" driver. Defaults to `/docker/buildx`.
|
||||||
|
- `kubernetes`
|
||||||
|
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||||
|
- `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace.
|
||||||
|
- `replicas=N` - Sets the number of `Pod` replicas. Defaults to 1.
|
||||||
|
- `requests.cpu` - Sets the request CPU value specified in units of Kubernetes CPU. Example `requests.cpu=100m`, `requests.cpu=2`
|
||||||
|
- `requests.memory` - Sets the request memory value specified in bytes or with a valid suffix. Example `requests.memory=500Mi`, `requests.memory=4G`
|
||||||
|
- `limits.cpu` - Sets the limit CPU value specified in units of Kubernetes CPU. Example `limits.cpu=100m`, `limits.cpu=2`
|
||||||
|
- `limits.memory` - Sets the limit memory value specified in bytes or with a valid suffix. Example `limits.memory=500Mi`, `limits.memory=4G`
|
||||||
|
- `nodeselector="label1=value1,label2=value2"` - Sets the kv of `Pod` nodeSelector. No Defaults. Example `nodeselector=kubernetes.io/arch=arm64`
|
||||||
|
- `rootless=(true|false)` - Run the container as a non-root user without `securityContext.privileged`. [Using Ubuntu host kernel is recommended](https://github.com/moby/buildkit/blob/master/docs/rootless.md). Defaults to false.
|
||||||
|
- `loadbalance=(sticky|random)` - Load-balancing strategy. If set to "sticky", the pod is chosen using the hash of the context path. Defaults to "sticky"
|
||||||
|
- `qemu.install=(true|false)` - Install QEMU emulation for multi platforms support.
|
||||||
|
- `qemu.image=IMAGE` - Sets the QEMU emulation image. Defaults to `tonistiigi/binfmt:latest`
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
#### Use a custom network
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker network create foonet
|
||||||
|
$ docker buildx create --name builder --driver docker-container --driver-opt network=foonet --use
|
||||||
|
$ docker buildx inspect --bootstrap
|
||||||
|
$ docker inspect buildx_buildkit_builder0 --format={{.NetworkSettings.Networks}}
|
||||||
|
map[foonet:0xc00018c0c0]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### OpenTelemetry support
|
||||||
|
|
||||||
|
To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set
|
||||||
|
`JAEGER_TRACE` environment variable to the collection address using the `driver-opt`:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker run -d --name jaeger -p 6831:6831/udp -p 16686:16686 jaegertracing/all-in-one
|
||||||
|
$ docker buildx create --name builder --driver docker-container --driver-opt network=host --driver-opt env.JAEGER_TRACE=localhost:6831 --use
|
||||||
|
$ docker buildx inspect --bootstrap
|
||||||
|
# buildx command should be traced at http://127.0.0.1:16686/
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="leave"></a> Remove a node from a builder (--leave)
|
||||||
|
|
||||||
|
The `--leave` flag changes the action of the command to remove a node from a
|
||||||
|
builder. The builder needs to be specified with `--name` and node that is removed
|
||||||
|
is set with `--node`.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx create --name mybuilder --node mybuilder0 --leave
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="name"></a> Specify the name of the builder (--name)
|
||||||
|
|
||||||
|
```
|
||||||
|
--name NAME
|
||||||
|
```
|
||||||
|
|
||||||
|
The `--name` flag specifies the name of the builder to be created or modified.
|
||||||
|
If none is specified, one will be automatically generated.
|
||||||
|
|
||||||
|
### <a name="node"></a> Specify the name of the node (--node)
|
||||||
|
|
||||||
|
```
|
||||||
|
--node NODE
|
||||||
|
```
|
||||||
|
|
||||||
|
The `--node` flag specifies the name of the node to be created or modified. If
|
||||||
|
none is specified, it is the name of the builder it belongs to, with an index
|
||||||
|
number suffix.
|
||||||
|
|
||||||
|
### <a name="platform"></a> Set the platforms supported by the node
|
||||||
|
|
||||||
|
```
|
||||||
|
--platform PLATFORMS
|
||||||
|
```
|
||||||
|
|
||||||
|
The `--platform` flag sets the platforms supported by the node. It expects a
|
||||||
|
comma-separated list of platforms of the form OS/architecture/variant. The node
|
||||||
|
will also automatically detect the platforms it supports, but manual values take
|
||||||
|
priority over the detected ones and can be used when multiple nodes support
|
||||||
|
building for the same platform.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx create --platform linux/amd64
|
||||||
|
$ docker buildx create --platform linux/arm64,linux/arm/v8
|
||||||
|
```
|
||||||
|
|
||||||
|
### <a name="use"></a> Automatically switch to the newly created builder
|
||||||
|
|
||||||
|
The `--use` flag automatically switches the current builder to the newly created
|
||||||
|
one. Equivalent to running `docker buildx use $(docker buildx create ...)`.
|
||||||
25
docs/reference/buildx_du.md
Normal file
25
docs/reference/buildx_du.md
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
# buildx du
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx du
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Disk usage
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
| `--filter filter` | Provide filter values |
|
||||||
|
| `--verbose` | Provide a more verbose output |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|
||||||
|
Same as [`buildx --builder`](buildx.md#builder).
|
||||||
36
docs/reference/buildx_imagetools.md
Normal file
36
docs/reference/buildx_imagetools.md
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
# buildx imagetools
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx imagetools [OPTIONS] COMMAND
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Commands to work on images in registry
|
||||||
|
|
||||||
|
### Subcommands
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| [`create`](buildx_imagetools_create.md) | Create a new image based on source images |
|
||||||
|
| [`inspect`](buildx_imagetools_inspect.md) | Show details of image in the registry |
|
||||||
|
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Imagetools contains commands for working with manifest lists in the registry.
|
||||||
|
These commands are useful for inspecting multi-platform build results.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|
||||||
|
Same as [`buildx --builder`](buildx.md#builder).
|
||||||
84
docs/reference/buildx_imagetools_create.md
Normal file
84
docs/reference/buildx_imagetools_create.md
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
# buildx imagetools create
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx imagetools create [OPTIONS] [SOURCE] [SOURCE...]
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Create a new image based on source images
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| [`--append`](#append) | Append to existing manifest |
|
||||||
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
| [`--dry-run`](#dry-run) | Show final image instead of pushing |
|
||||||
|
| [`-f`](#file), [`--file stringArray`](#file) | Read source descriptor from file |
|
||||||
|
| [`-t`](#tag), [`--tag stringArray`](#tag) | Set reference for new image |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Imagetools contains commands for working with manifest lists in the registry.
|
||||||
|
These commands are useful for inspecting multi-platform build results.
|
||||||
|
|
||||||
|
Create a new manifest list based on source manifests. The source manifests can
|
||||||
|
be manifest lists or single platform distribution manifests and must already
|
||||||
|
exist in the registry where the new manifest is created. If only one source is
|
||||||
|
specified, create performs a carbon copy.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="append"></a> Append new sources to an existing manifest list (--append)
|
||||||
|
|
||||||
|
Use the `--append` flag to append the new sources to an existing manifest list
|
||||||
|
in the destination.
|
||||||
|
|
||||||
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|
||||||
|
Same as [`buildx --builder`](buildx.md#builder).
|
||||||
|
|
||||||
|
### <a name="dry-run"></a> Show final image instead of pushing (--dry-run)
|
||||||
|
|
||||||
|
Use the `--dry-run` flag to not push the image, just show it.
|
||||||
|
|
||||||
|
### <a name="file"></a> Read source descriptor from a file (-f, --file)
|
||||||
|
|
||||||
|
```
|
||||||
|
-f FILE or --file FILE
|
||||||
|
```
|
||||||
|
|
||||||
|
Reads source from files. A source can be a manifest digest, manifest reference,
|
||||||
|
or a JSON of OCI descriptor object.
|
||||||
|
|
||||||
|
In order to define annotations or additional platform properties like `os.version` and
|
||||||
|
`os.features` you need to add them in the OCI descriptor object encoded in JSON.
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx imagetools inspect --raw alpine | jq '.manifests[0] | .platform."os.version"="10.1"' > descr.json
|
||||||
|
docker buildx imagetools create -f descr.json myuser/image
|
||||||
|
```
|
||||||
|
|
||||||
|
The descriptor in the file is merged with existing descriptor in the registry if it exists.
|
||||||
|
|
||||||
|
The supported fields for the descriptor are defined in [OCI spec](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) .
|
||||||
|
|
||||||
|
|
||||||
|
### <a name="tag"></a> Set reference for new image (-t, --tag)
|
||||||
|
|
||||||
|
```
|
||||||
|
-t IMAGE or --tag IMAGE
|
||||||
|
```
|
||||||
|
|
||||||
|
Use the `-t` or `--tag` flag to set the name of the image to be created.
|
||||||
|
|
||||||
|
**Examples**
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx imagetools create --dry-run alpine@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907 sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
||||||
|
|
||||||
|
$ docker buildx imagetools create -t tonistiigi/myapp -f image1 -f image2
|
||||||
|
```
|
||||||
53
docs/reference/buildx_imagetools_inspect.md
Normal file
53
docs/reference/buildx_imagetools_inspect.md
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
# buildx imagetools inspect
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx imagetools inspect [OPTIONS] NAME
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Show details of image in the registry
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
| [`--raw`](#raw) | Show original JSON manifest |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Show details of image in the registry.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx imagetools inspect alpine
|
||||||
|
|
||||||
|
Name: docker.io/library/alpine:latest
|
||||||
|
MediaType: application/vnd.docker.distribution.manifest.list.v2+json
|
||||||
|
Digest: sha256:28ef97b8686a0b5399129e9b763d5b7e5ff03576aa5580d6f4182a49c5fe1913
|
||||||
|
|
||||||
|
Manifests:
|
||||||
|
Name: docker.io/library/alpine:latest@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907
|
||||||
|
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||||
|
Platform: linux/amd64
|
||||||
|
|
||||||
|
Name: docker.io/library/alpine:latest@sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
||||||
|
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||||
|
Platform: linux/arm/v6
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|
||||||
|
Same as [`buildx --builder`](buildx.md#builder).
|
||||||
|
|
||||||
|
### <a name="raw"></a> Show original, unformatted JSON manifest (--raw)
|
||||||
|
|
||||||
|
Use the `--raw` option to print the original JSON bytes instead of the formatted
|
||||||
|
output.
|
||||||
62
docs/reference/buildx_inspect.md
Normal file
62
docs/reference/buildx_inspect.md
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
# buildx inspect
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx inspect [NAME]
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Inspect current builder instance
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| [`--bootstrap`](#bootstrap) | Ensure builder has booted before inspecting |
|
||||||
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Shows information about the current or specified builder.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="bootstrap"></a> Ensure that the builder is running before inspecting (--bootstrap)
|
||||||
|
|
||||||
|
Use the `--bootstrap` option to ensure that the builder is running before
|
||||||
|
inspecting it. If the driver is `docker-container`, then `--bootstrap` starts
|
||||||
|
the buildkit container and waits until it is operational. Bootstrapping is
|
||||||
|
automatically done during build, and therefore not necessary. The same BuildKit
|
||||||
|
container is used during the lifetime of the associated builder node (as
|
||||||
|
displayed in `buildx ls`).
|
||||||
|
|
||||||
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|
||||||
|
Same as [`buildx --builder`](buildx.md#builder).
|
||||||
|
|
||||||
|
### Get information about a builder instance
|
||||||
|
|
||||||
|
By default, `inspect` shows information about the current builder. Specify the
|
||||||
|
name of the builder to inspect to get information about that builder.
|
||||||
|
The following example shows information about a builder instance named
|
||||||
|
`elated_tesla`:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx inspect elated_tesla
|
||||||
|
|
||||||
|
Name: elated_tesla
|
||||||
|
Driver: docker-container
|
||||||
|
|
||||||
|
Nodes:
|
||||||
|
Name: elated_tesla0
|
||||||
|
Endpoint: unix:///var/run/docker.sock
|
||||||
|
Status: running
|
||||||
|
Platforms: linux/amd64
|
||||||
|
|
||||||
|
Name: elated_tesla1
|
||||||
|
Endpoint: ssh://ubuntu@1.2.3.4
|
||||||
|
Status: running
|
||||||
|
Platforms: linux/arm64, linux/arm/v7, linux/arm/v6
|
||||||
|
```
|
||||||
11
docs/reference/buildx_install.md
Normal file
11
docs/reference/buildx_install.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# buildx install
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx install
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Install buildx as a 'docker builder' alias
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
31
docs/reference/buildx_ls.md
Normal file
31
docs/reference/buildx_ls.md
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# buildx ls
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx ls
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
List builder instances
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Lists all builder instances and the nodes for each instance
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx ls
|
||||||
|
|
||||||
|
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
|
||||||
|
elated_tesla * docker-container
|
||||||
|
elated_tesla0 unix:///var/run/docker.sock running linux/amd64
|
||||||
|
elated_tesla1 ssh://ubuntu@1.2.3.4 running linux/arm64, linux/arm/v7, linux/arm/v6
|
||||||
|
default docker
|
||||||
|
default default running linux/amd64
|
||||||
|
```
|
||||||
|
|
||||||
|
Each builder has one or more nodes associated with it. The current builder's
|
||||||
|
name is marked with a `*`.
|
||||||
28
docs/reference/buildx_prune.md
Normal file
28
docs/reference/buildx_prune.md
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# buildx prune
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx prune
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Remove build cache
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| `-a`, `--all` | Remove all unused images, not just dangling ones |
|
||||||
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
| `--filter filter` | Provide filter values (e.g., `until=24h`) |
|
||||||
|
| `-f`, `--force` | Do not prompt for confirmation |
|
||||||
|
| `--keep-storage bytes` | Amount of disk space to keep for cache |
|
||||||
|
| `--verbose` | Provide a more verbose output |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|
||||||
|
Same as [`buildx --builder`](buildx.md#builder).
|
||||||
34
docs/reference/buildx_rm.md
Normal file
34
docs/reference/buildx_rm.md
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
# buildx rm
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx rm [NAME]
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Remove a builder instance
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
| [`--keep-state`](#keep-state) | Keep BuildKit state |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Removes the specified or current builder. It is a no-op attempting to remove the
|
||||||
|
default builder.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|
||||||
|
Same as [`buildx --builder`](buildx.md#builder).
|
||||||
|
|
||||||
|
### <a name="keep-state"></a> Keep BuildKit state (--keep-state)
|
||||||
|
|
||||||
|
Keep BuildKit state, so it can be reused by a new builder with the same name.
|
||||||
|
Currently, only supported by the [`docker-container` driver](buildx_create.md#driver).
|
||||||
28
docs/reference/buildx_stop.md
Normal file
28
docs/reference/buildx_stop.md
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# buildx stop
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx stop [NAME]
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Stop builder instance
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Stops the specified or current builder. This will not prevent buildx build to
|
||||||
|
restart the builder. The implementation of stop depends on the driver.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|
||||||
|
Same as [`buildx --builder`](buildx.md#builder).
|
||||||
11
docs/reference/buildx_uninstall.md
Normal file
11
docs/reference/buildx_uninstall.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
# buildx uninstall
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx uninstall
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Uninstall the 'docker builder' alias
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
31
docs/reference/buildx_use.md
Normal file
31
docs/reference/buildx_use.md
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# buildx use
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx use [OPTIONS] NAME
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Set the current builder instance
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
| Name | Description |
|
||||||
|
| --- | --- |
|
||||||
|
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||||
|
| `--default` | Set builder as default for current context |
|
||||||
|
| `--global` | Builder persists context changes |
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Description
|
||||||
|
|
||||||
|
Switches the current builder instance. Build commands invoked after this command
|
||||||
|
will run on a specified builder. Alternatively, a context name can be used to
|
||||||
|
switch to the default builder of that context.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||||
|
|
||||||
|
Same as [`buildx --builder`](buildx.md#builder).
|
||||||
21
docs/reference/buildx_version.md
Normal file
21
docs/reference/buildx_version.md
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# buildx version
|
||||||
|
|
||||||
|
```
|
||||||
|
docker buildx version
|
||||||
|
```
|
||||||
|
|
||||||
|
<!---MARKER_GEN_START-->
|
||||||
|
Show buildx version information
|
||||||
|
|
||||||
|
|
||||||
|
<!---MARKER_GEN_END-->
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
### View version information
|
||||||
|
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ docker buildx version
|
||||||
|
github.com/docker/buildx v0.5.1-docker 11057da37336192bfc57d81e02359ba7ba848e4a
|
||||||
|
```
|
||||||
@@ -2,5 +2,6 @@ package bkimage
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
DefaultImage = "moby/buildkit:buildx-stable-1" // TODO: make this verified
|
DefaultImage = "moby/buildkit:buildx-stable-1" // TODO: make this verified
|
||||||
DefaultRootlessImage = "moby/buildkit:v0.6.2-rootless"
|
QemuImage = "tonistiigi/binfmt:latest" // TODO: make this verified
|
||||||
|
DefaultRootlessImage = DefaultImage + "-rootless"
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,35 +1,46 @@
|
|||||||
package docker
|
package docker
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/tar"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/docker/buildx/driver"
|
"github.com/docker/buildx/driver"
|
||||||
"github.com/docker/buildx/driver/bkimage"
|
"github.com/docker/buildx/driver/bkimage"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/imagetools"
|
"github.com/docker/buildx/util/imagetools"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
dockertypes "github.com/docker/docker/api/types"
|
dockertypes "github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/api/types/container"
|
"github.com/docker/docker/api/types/container"
|
||||||
|
"github.com/docker/docker/api/types/mount"
|
||||||
"github.com/docker/docker/api/types/network"
|
"github.com/docker/docker/api/types/network"
|
||||||
dockerclient "github.com/docker/docker/client"
|
dockerclient "github.com/docker/docker/client"
|
||||||
|
dockerarchive "github.com/docker/docker/pkg/archive"
|
||||||
|
"github.com/docker/docker/pkg/idtools"
|
||||||
"github.com/docker/docker/pkg/stdcopy"
|
"github.com/docker/docker/pkg/stdcopy"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/util/tracing/detect"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
volumeStateSuffix = "_state"
|
||||||
|
)
|
||||||
|
|
||||||
type Driver struct {
|
type Driver struct {
|
||||||
driver.InitConfig
|
driver.InitConfig
|
||||||
factory driver.Factory
|
factory driver.Factory
|
||||||
netMode string
|
netMode string
|
||||||
image string
|
image string
|
||||||
env []string
|
cgroupParent string
|
||||||
|
env []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) IsMobyDriver() bool {
|
func (d *Driver) IsMobyDriver() bool {
|
||||||
@@ -102,22 +113,31 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
|||||||
hc := &container.HostConfig{
|
hc := &container.HostConfig{
|
||||||
Privileged: true,
|
Privileged: true,
|
||||||
UsernsMode: "host",
|
UsernsMode: "host",
|
||||||
|
Mounts: []mount.Mount{
|
||||||
|
{
|
||||||
|
Type: mount.TypeVolume,
|
||||||
|
Source: d.Name + volumeStateSuffix,
|
||||||
|
Target: confutil.DefaultBuildKitStateDir,
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
if d.netMode != "" {
|
if d.netMode != "" {
|
||||||
hc.NetworkMode = container.NetworkMode(d.netMode)
|
hc.NetworkMode = container.NetworkMode(d.netMode)
|
||||||
}
|
}
|
||||||
|
if info, err := d.DockerAPI.Info(ctx); err == nil && info.CgroupDriver == "cgroupfs" {
|
||||||
|
// Place all buildkit containers inside this cgroup by default so limits can be attached
|
||||||
|
// to all build activity on the host.
|
||||||
|
hc.CgroupParent = "/docker/buildx"
|
||||||
|
if d.cgroupParent != "" {
|
||||||
|
hc.CgroupParent = d.cgroupParent
|
||||||
|
}
|
||||||
|
}
|
||||||
_, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, d.Name)
|
_, err := d.DockerAPI.ContainerCreate(ctx, cfg, hc, &network.NetworkingConfig{}, nil, d.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if f := d.InitConfig.ConfigFile; f != "" {
|
if err := d.copyToContainer(ctx, d.InitConfig.Files); err != nil {
|
||||||
buf, err := readFileToTar(f)
|
return err
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := d.DockerAPI.CopyToContainer(ctx, d.Name, "/", buf, dockertypes.CopyToContainerOptions{}); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err := d.start(ctx, l); err != nil {
|
if err := d.start(ctx, l); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -177,6 +197,24 @@ func (d *Driver) copyLogs(ctx context.Context, l progress.SubLogger) error {
|
|||||||
return rc.Close()
|
return rc.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Driver) copyToContainer(ctx context.Context, files map[string][]byte) error {
|
||||||
|
srcPath, err := writeConfigFiles(files)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if srcPath != "" {
|
||||||
|
defer os.RemoveAll(srcPath)
|
||||||
|
}
|
||||||
|
srcArchive, err := dockerarchive.TarWithOptions(srcPath, &dockerarchive.TarOptions{
|
||||||
|
ChownOpts: &idtools.Identity{UID: 0, GID: 0},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer srcArchive.Close()
|
||||||
|
return d.DockerAPI.CopyToContainer(ctx, d.Name, "/", srcArchive, dockertypes.CopyToContainerOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Driver) exec(ctx context.Context, cmd []string) (string, net.Conn, error) {
|
func (d *Driver) exec(ctx context.Context, cmd []string) (string, net.Conn, error) {
|
||||||
execConfig := types.ExecConfig{
|
execConfig := types.ExecConfig{
|
||||||
Cmd: cmd,
|
Cmd: cmd,
|
||||||
@@ -225,7 +263,7 @@ func (d *Driver) start(ctx context.Context, l progress.SubLogger) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
||||||
container, err := d.DockerAPI.ContainerInspect(ctx, d.Name)
|
ctn, err := d.DockerAPI.ContainerInspect(ctx, d.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if dockerclient.IsErrNotFound(err) {
|
if dockerclient.IsErrNotFound(err) {
|
||||||
return &driver.Info{
|
return &driver.Info{
|
||||||
@@ -235,7 +273,7 @@ func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if container.State.Running {
|
if ctn.State.Running {
|
||||||
return &driver.Info{
|
return &driver.Info{
|
||||||
Status: driver.Running,
|
Status: driver.Running,
|
||||||
}, nil
|
}, nil
|
||||||
@@ -257,16 +295,30 @@ func (d *Driver) Stop(ctx context.Context, force bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Rm(ctx context.Context, force bool) error {
|
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||||
info, err := d.Info(ctx)
|
info, err := d.Info(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if info.Status != driver.Inactive {
|
if info.Status != driver.Inactive {
|
||||||
return d.DockerAPI.ContainerRemove(ctx, d.Name, dockertypes.ContainerRemoveOptions{
|
container, err := d.DockerAPI.ContainerInspect(ctx, d.Name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := d.DockerAPI.ContainerRemove(ctx, d.Name, dockertypes.ContainerRemoveOptions{
|
||||||
RemoveVolumes: true,
|
RemoveVolumes: true,
|
||||||
Force: true,
|
Force: force,
|
||||||
})
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, v := range container.Mounts {
|
||||||
|
if v.Name == d.Name+volumeStateSuffix {
|
||||||
|
if rmVolume {
|
||||||
|
return d.DockerAPI.VolumeRemove(ctx, d.Name+volumeStateSuffix, false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -279,9 +331,16 @@ func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
|
|||||||
|
|
||||||
conn = demuxConn(conn)
|
conn = demuxConn(conn)
|
||||||
|
|
||||||
|
exp, err := detect.Exporter()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
td, _ := exp.(client.TracerDelegate)
|
||||||
|
|
||||||
return client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
return client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
||||||
return conn, nil
|
return conn, nil
|
||||||
}))
|
}), client.WithTracerDelegate(td))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Factory() driver.Factory {
|
func (d *Driver) Factory() driver.Factory {
|
||||||
@@ -317,29 +376,6 @@ func (d *demux) Read(dt []byte) (int, error) {
|
|||||||
return d.Reader.Read(dt)
|
return d.Reader.Read(dt)
|
||||||
}
|
}
|
||||||
|
|
||||||
func readFileToTar(fn string) (*bytes.Buffer, error) {
|
|
||||||
buf := bytes.NewBuffer(nil)
|
|
||||||
tw := tar.NewWriter(buf)
|
|
||||||
dt, err := ioutil.ReadFile(fn)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := tw.WriteHeader(&tar.Header{
|
|
||||||
Name: "/etc/buildkit/buildkitd.toml",
|
|
||||||
Size: int64(len(dt)),
|
|
||||||
Mode: 0644,
|
|
||||||
}); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if _, err := tw.Write(dt); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err := tw.Close(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return buf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type logWriter struct {
|
type logWriter struct {
|
||||||
logger progress.SubLogger
|
logger progress.SubLogger
|
||||||
stream int
|
stream int
|
||||||
@@ -349,3 +385,27 @@ func (l *logWriter) Write(dt []byte) (int, error) {
|
|||||||
l.logger.Log(l.stream, dt)
|
l.logger.Log(l.stream, dt)
|
||||||
return len(dt), nil
|
return len(dt), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func writeConfigFiles(m map[string][]byte) (_ string, err error) {
|
||||||
|
// Temp dir that will be copied to the container
|
||||||
|
tmpDir, err := os.MkdirTemp("", "buildkitd-config")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err != nil {
|
||||||
|
os.RemoveAll(tmpDir)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
for f, dt := range m {
|
||||||
|
f = path.Join(confutil.DefaultBuildKitConfigDir, f)
|
||||||
|
p := filepath.Join(tmpDir, f)
|
||||||
|
if err := os.MkdirAll(filepath.Dir(p), 0700); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if err := os.WriteFile(p, dt, 0600); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return tmpDir, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -49,6 +49,8 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
|||||||
}
|
}
|
||||||
case k == "image":
|
case k == "image":
|
||||||
d.image = v
|
d.image = v
|
||||||
|
case k == "cgroup-parent":
|
||||||
|
d.cgroupParent = v
|
||||||
case strings.HasPrefix(k, "env."):
|
case strings.HasPrefix(k, "env."):
|
||||||
envName := strings.TrimPrefix(k, "env.")
|
envName := strings.TrimPrefix(k, "env.")
|
||||||
if envName == "" {
|
if envName == "" {
|
||||||
|
|||||||
@@ -33,13 +33,15 @@ func (d *Driver) Stop(ctx context.Context, force bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Rm(ctx context.Context, force bool) error {
|
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
|
func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
|
||||||
return client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
return client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
||||||
return d.DockerAPI.DialHijack(ctx, "/grpc", "h2c", nil)
|
return d.DockerAPI.DialHijack(ctx, "/grpc", "h2c", nil)
|
||||||
|
}), client.WithSessionDialer(func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
|
||||||
|
return d.DockerAPI.DialHijack(ctx, "/session", proto, meta)
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -44,7 +44,7 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
|||||||
if cfg.DockerAPI == nil {
|
if cfg.DockerAPI == nil {
|
||||||
return nil, errors.Errorf("docker driver requires docker API access")
|
return nil, errors.Errorf("docker driver requires docker API access")
|
||||||
}
|
}
|
||||||
if cfg.ConfigFile != "" {
|
if len(cfg.Files) > 0 {
|
||||||
return nil, errors.Errorf("setting config file is not supported for docker driver, use dockerd configuration file")
|
return nil, errors.Errorf("setting config file is not supported for docker driver, use dockerd configuration file")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -54,14 +54,14 @@ type Driver interface {
|
|||||||
Bootstrap(context.Context, progress.Logger) error
|
Bootstrap(context.Context, progress.Logger) error
|
||||||
Info(context.Context) (*Info, error)
|
Info(context.Context) (*Info, error)
|
||||||
Stop(ctx context.Context, force bool) error
|
Stop(ctx context.Context, force bool) error
|
||||||
Rm(ctx context.Context, force bool) error
|
Rm(ctx context.Context, force bool, rmVolume bool) error
|
||||||
Client(ctx context.Context) (*client.Client, error)
|
Client(ctx context.Context) (*client.Client, error)
|
||||||
Features() map[Feature]bool
|
Features() map[Feature]bool
|
||||||
IsMobyDriver() bool
|
IsMobyDriver() bool
|
||||||
Config() InitConfig
|
Config() InitConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func Boot(ctx context.Context, d Driver, pw progress.Writer) (*client.Client, error) {
|
func Boot(ctx, clientContext context.Context, d Driver, pw progress.Writer) (*client.Client, error) {
|
||||||
try := 0
|
try := 0
|
||||||
for {
|
for {
|
||||||
info, err := d.Info(ctx)
|
info, err := d.Info(ctx)
|
||||||
@@ -78,7 +78,7 @@ func Boot(ctx context.Context, d Driver, pw progress.Writer) (*client.Client, er
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := d.Client(context.TODO())
|
c, err := d.Client(clientContext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Cause(err) == ErrNotRunning && try <= 2 {
|
if errors.Cause(err) == ErrNotRunning && try <= 2 {
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -15,8 +15,11 @@ import (
|
|||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
"github.com/docker/buildx/util/progress"
|
"github.com/docker/buildx/util/progress"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
|
"github.com/moby/buildkit/util/tracing/detect"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
|
clientappsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||||
@@ -38,15 +41,18 @@ type Driver struct {
|
|||||||
factory driver.Factory
|
factory driver.Factory
|
||||||
minReplicas int
|
minReplicas int
|
||||||
deployment *appsv1.Deployment
|
deployment *appsv1.Deployment
|
||||||
|
configMaps []*corev1.ConfigMap
|
||||||
clientset *kubernetes.Clientset
|
clientset *kubernetes.Clientset
|
||||||
deploymentClient clientappsv1.DeploymentInterface
|
deploymentClient clientappsv1.DeploymentInterface
|
||||||
podClient clientcorev1.PodInterface
|
podClient clientcorev1.PodInterface
|
||||||
|
configMapClient clientcorev1.ConfigMapInterface
|
||||||
podChooser podchooser.PodChooser
|
podChooser podchooser.PodChooser
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) IsMobyDriver() bool {
|
func (d *Driver) IsMobyDriver() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Config() driver.InitConfig {
|
func (d *Driver) Config() driver.InitConfig {
|
||||||
return d.InitConfig
|
return d.InitConfig
|
||||||
}
|
}
|
||||||
@@ -55,7 +61,24 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
|||||||
return progress.Wrap("[internal] booting buildkit", l, func(sub progress.SubLogger) error {
|
return progress.Wrap("[internal] booting buildkit", l, func(sub progress.SubLogger) error {
|
||||||
_, err := d.deploymentClient.Get(ctx, d.deployment.Name, metav1.GetOptions{})
|
_, err := d.deploymentClient.Get(ctx, d.deployment.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// TODO: return err if err != ErrNotFound
|
if !apierrors.IsNotFound(err) {
|
||||||
|
return errors.Wrapf(err, "error for bootstrap %q", d.deployment.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, cfg := range d.configMaps {
|
||||||
|
// create ConfigMap first if exists
|
||||||
|
_, err = d.configMapClient.Create(ctx, cfg, metav1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
if !apierrors.IsAlreadyExists(err) {
|
||||||
|
return errors.Wrapf(err, "error while calling configMapClient.Create for %q", cfg.Name)
|
||||||
|
}
|
||||||
|
_, err = d.configMapClient.Update(ctx, cfg, metav1.UpdateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrapf(err, "error while calling configMapClient.Update for %q", cfg.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
_, err = d.deploymentClient.Create(ctx, d.deployment, metav1.CreateOptions{})
|
_, err = d.deploymentClient.Create(ctx, d.deployment, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrapf(err, "error while calling deploymentClient.Create for %q", d.deployment.Name)
|
return errors.Wrapf(err, "error while calling deploymentClient.Create for %q", d.deployment.Name)
|
||||||
@@ -142,9 +165,18 @@ func (d *Driver) Stop(ctx context.Context, force bool) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Rm(ctx context.Context, force bool) error {
|
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||||
if err := d.deploymentClient.Delete(ctx, d.deployment.Name, metav1.DeleteOptions{}); err != nil {
|
if err := d.deploymentClient.Delete(ctx, d.deployment.Name, metav1.DeleteOptions{}); err != nil {
|
||||||
return errors.Wrapf(err, "error while calling deploymentClient.Delete for %q", d.deployment.Name)
|
if !apierrors.IsNotFound(err) {
|
||||||
|
return errors.Wrapf(err, "error while calling deploymentClient.Delete for %q", d.deployment.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, cfg := range d.configMaps {
|
||||||
|
if err := d.configMapClient.Delete(ctx, cfg.Name, metav1.DeleteOptions{}); err != nil {
|
||||||
|
if !apierrors.IsNotFound(err) {
|
||||||
|
return errors.Wrapf(err, "error while calling configMapClient.Delete for %q", cfg.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -169,9 +201,17 @@ func (d *Driver) Client(ctx context.Context) (*client.Client, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
exp, err := detect.Exporter()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
td, _ := exp.(client.TracerDelegate)
|
||||||
|
|
||||||
return client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
return client.New(ctx, "", client.WithContextDialer(func(context.Context, string) (net.Conn, error) {
|
||||||
return conn, nil
|
return conn, nil
|
||||||
}))
|
}), client.WithTracerDelegate(td))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Driver) Factory() driver.Factory {
|
func (d *Driver) Factory() driver.Factory {
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ import (
|
|||||||
"github.com/docker/buildx/driver/bkimage"
|
"github.com/docker/buildx/driver/bkimage"
|
||||||
"github.com/docker/buildx/driver/kubernetes/manifest"
|
"github.com/docker/buildx/driver/kubernetes/manifest"
|
||||||
"github.com/docker/buildx/driver/kubernetes/podchooser"
|
"github.com/docker/buildx/driver/kubernetes/podchooser"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
|
||||||
dockerclient "github.com/docker/docker/client"
|
dockerclient "github.com/docker/docker/client"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
@@ -60,24 +59,33 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &Driver{
|
d := &Driver{
|
||||||
factory: f,
|
factory: f,
|
||||||
InitConfig: cfg,
|
InitConfig: cfg,
|
||||||
clientset: clientset,
|
clientset: clientset,
|
||||||
}
|
}
|
||||||
|
|
||||||
deploymentOpt := &manifest.DeploymentOpt{
|
deploymentOpt := &manifest.DeploymentOpt{
|
||||||
Name: deploymentName,
|
Name: deploymentName,
|
||||||
Image: bkimage.DefaultImage,
|
Image: bkimage.DefaultImage,
|
||||||
Replicas: 1,
|
Replicas: 1,
|
||||||
BuildkitFlags: cfg.BuildkitFlags,
|
BuildkitFlags: cfg.BuildkitFlags,
|
||||||
Rootless: false,
|
Rootless: false,
|
||||||
|
Platforms: cfg.Platforms,
|
||||||
|
ConfigFiles: cfg.Files,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
deploymentOpt.Qemu.Image = bkimage.QemuImage
|
||||||
|
|
||||||
loadbalance := LoadbalanceSticky
|
loadbalance := LoadbalanceSticky
|
||||||
imageOverride := ""
|
|
||||||
for k, v := range cfg.DriverOpts {
|
for k, v := range cfg.DriverOpts {
|
||||||
switch k {
|
switch k {
|
||||||
case "image":
|
case "image":
|
||||||
imageOverride = v
|
if v != "" {
|
||||||
|
deploymentOpt.Image = v
|
||||||
|
}
|
||||||
case "namespace":
|
case "namespace":
|
||||||
namespace = v
|
namespace = v
|
||||||
case "replicas":
|
case "replicas":
|
||||||
@@ -85,20 +93,20 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
case "requests.cpu":
|
||||||
|
deploymentOpt.RequestsCPU = v
|
||||||
|
case "requests.memory":
|
||||||
|
deploymentOpt.RequestsMemory = v
|
||||||
|
case "limits.cpu":
|
||||||
|
deploymentOpt.LimitsCPU = v
|
||||||
|
case "limits.memory":
|
||||||
|
deploymentOpt.LimitsMemory = v
|
||||||
case "rootless":
|
case "rootless":
|
||||||
deploymentOpt.Rootless, err = strconv.ParseBool(v)
|
deploymentOpt.Rootless, err = strconv.ParseBool(v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
deploymentOpt.Image = bkimage.DefaultRootlessImage
|
deploymentOpt.Image = bkimage.DefaultRootlessImage
|
||||||
case "platform":
|
|
||||||
if v != "" {
|
|
||||||
platforms, err := platformutil.Parse(strings.Split(v, ","))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
deploymentOpt.Platforms = platforms
|
|
||||||
}
|
|
||||||
case "nodeselector":
|
case "nodeselector":
|
||||||
kvs := strings.Split(strings.Trim(v, `"`), ",")
|
kvs := strings.Split(strings.Trim(v, `"`), ",")
|
||||||
s := map[string]string{}
|
s := map[string]string{}
|
||||||
@@ -117,20 +125,31 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
|||||||
return nil, errors.Errorf("invalid loadbalance %q", v)
|
return nil, errors.Errorf("invalid loadbalance %q", v)
|
||||||
}
|
}
|
||||||
loadbalance = v
|
loadbalance = v
|
||||||
|
case "qemu.install":
|
||||||
|
deploymentOpt.Qemu.Install, err = strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
case "qemu.image":
|
||||||
|
if v != "" {
|
||||||
|
deploymentOpt.Qemu.Image = v
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
return nil, errors.Errorf("invalid driver option %s for driver %s", k, DriverName)
|
return nil, errors.Errorf("invalid driver option %s for driver %s", k, DriverName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if imageOverride != "" {
|
|
||||||
deploymentOpt.Image = imageOverride
|
d.deployment, d.configMaps, err = manifest.NewDeployment(deploymentOpt)
|
||||||
}
|
|
||||||
d.deployment, err = manifest.NewDeployment(deploymentOpt)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
d.minReplicas = deploymentOpt.Replicas
|
d.minReplicas = deploymentOpt.Replicas
|
||||||
|
|
||||||
d.deploymentClient = clientset.AppsV1().Deployments(namespace)
|
d.deploymentClient = clientset.AppsV1().Deployments(namespace)
|
||||||
d.podClient = clientset.CoreV1().Pods(namespace)
|
d.podClient = clientset.CoreV1().Pods(namespace)
|
||||||
|
d.configMapClient = clientset.CoreV1().ConfigMaps(namespace)
|
||||||
|
|
||||||
switch loadbalance {
|
switch loadbalance {
|
||||||
case LoadbalanceSticky:
|
case LoadbalanceSticky:
|
||||||
d.podChooser = &podchooser.StickyPodChooser{
|
d.podChooser = &podchooser.StickyPodChooser{
|
||||||
|
|||||||
@@ -1,24 +1,42 @@
|
|||||||
package manifest
|
package manifest
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
v1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DeploymentOpt struct {
|
type DeploymentOpt struct {
|
||||||
Namespace string
|
Namespace string
|
||||||
Name string
|
Name string
|
||||||
Image string
|
Image string
|
||||||
Replicas int
|
Replicas int
|
||||||
|
|
||||||
|
// Qemu
|
||||||
|
Qemu struct {
|
||||||
|
// when true, will install binfmt
|
||||||
|
Install bool
|
||||||
|
Image string
|
||||||
|
}
|
||||||
|
|
||||||
BuildkitFlags []string
|
BuildkitFlags []string
|
||||||
Rootless bool
|
// files mounted at /etc/buildkitd
|
||||||
NodeSelector map[string]string
|
ConfigFiles map[string][]byte
|
||||||
Platforms []v1.Platform
|
|
||||||
|
Rootless bool
|
||||||
|
NodeSelector map[string]string
|
||||||
|
RequestsCPU string
|
||||||
|
RequestsMemory string
|
||||||
|
LimitsCPU string
|
||||||
|
LimitsMemory string
|
||||||
|
Platforms []v1.Platform
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -26,7 +44,7 @@ const (
|
|||||||
AnnotationPlatform = "buildx.docker.com/platform"
|
AnnotationPlatform = "buildx.docker.com/platform"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewDeployment(opt *DeploymentOpt) (*appsv1.Deployment, error) {
|
func NewDeployment(opt *DeploymentOpt) (d *appsv1.Deployment, c []*corev1.ConfigMap, err error) {
|
||||||
labels := map[string]string{
|
labels := map[string]string{
|
||||||
"app": opt.Name,
|
"app": opt.Name,
|
||||||
}
|
}
|
||||||
@@ -39,7 +57,7 @@ func NewDeployment(opt *DeploymentOpt) (*appsv1.Deployment, error) {
|
|||||||
annotations[AnnotationPlatform] = strings.Join(platformutil.Format(opt.Platforms), ",")
|
annotations[AnnotationPlatform] = strings.Join(platformutil.Format(opt.Platforms), ",")
|
||||||
}
|
}
|
||||||
|
|
||||||
d := &appsv1.Deployment{
|
d = &appsv1.Deployment{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
APIVersion: appsv1.SchemeGroupVersion.String(),
|
APIVersion: appsv1.SchemeGroupVersion.String(),
|
||||||
Kind: "Deployment",
|
Kind: "Deployment",
|
||||||
@@ -76,15 +94,64 @@ func NewDeployment(opt *DeploymentOpt) (*appsv1.Deployment, error) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
Resources: corev1.ResourceRequirements{
|
||||||
|
Requests: corev1.ResourceList{},
|
||||||
|
Limits: corev1.ResourceList{},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
for _, cfg := range splitConfigFiles(opt.ConfigFiles) {
|
||||||
|
cc := &corev1.ConfigMap{
|
||||||
|
TypeMeta: metav1.TypeMeta{
|
||||||
|
APIVersion: corev1.SchemeGroupVersion.String(),
|
||||||
|
Kind: "ConfigMap",
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Namespace: opt.Namespace,
|
||||||
|
Name: opt.Name + "-" + cfg.name,
|
||||||
|
Annotations: annotations,
|
||||||
|
},
|
||||||
|
Data: cfg.files,
|
||||||
|
}
|
||||||
|
|
||||||
|
d.Spec.Template.Spec.Containers[0].VolumeMounts = []corev1.VolumeMount{{
|
||||||
|
Name: cfg.name,
|
||||||
|
MountPath: path.Join("/etc/buildkit", cfg.path),
|
||||||
|
}}
|
||||||
|
|
||||||
|
d.Spec.Template.Spec.Volumes = []corev1.Volume{{
|
||||||
|
Name: "config",
|
||||||
|
VolumeSource: corev1.VolumeSource{
|
||||||
|
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||||
|
LocalObjectReference: corev1.LocalObjectReference{
|
||||||
|
Name: cc.Name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
c = append(c, cc)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.Qemu.Install {
|
||||||
|
d.Spec.Template.Spec.InitContainers = []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "qemu",
|
||||||
|
Image: opt.Qemu.Image,
|
||||||
|
Args: []string{"--install", "all"},
|
||||||
|
SecurityContext: &corev1.SecurityContext{
|
||||||
|
Privileged: &privileged,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if opt.Rootless {
|
if opt.Rootless {
|
||||||
if err := toRootless(d); err != nil {
|
if err := toRootless(d); err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -92,7 +159,39 @@ func NewDeployment(opt *DeploymentOpt) (*appsv1.Deployment, error) {
|
|||||||
d.Spec.Template.Spec.NodeSelector = opt.NodeSelector
|
d.Spec.Template.Spec.NodeSelector = opt.NodeSelector
|
||||||
}
|
}
|
||||||
|
|
||||||
return d, nil
|
if opt.RequestsCPU != "" {
|
||||||
|
reqCPU, err := resource.ParseQuantity(opt.RequestsCPU)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
d.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceCPU] = reqCPU
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.RequestsMemory != "" {
|
||||||
|
reqMemory, err := resource.ParseQuantity(opt.RequestsMemory)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
d.Spec.Template.Spec.Containers[0].Resources.Requests[corev1.ResourceMemory] = reqMemory
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.LimitsCPU != "" {
|
||||||
|
limCPU, err := resource.ParseQuantity(opt.LimitsCPU)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
d.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceCPU] = limCPU
|
||||||
|
}
|
||||||
|
|
||||||
|
if opt.LimitsMemory != "" {
|
||||||
|
limMemory, err := resource.ParseQuantity(opt.LimitsMemory)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
d.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceMemory] = limMemory
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func toRootless(d *appsv1.Deployment) error {
|
func toRootless(d *appsv1.Deployment) error {
|
||||||
@@ -108,3 +207,35 @@ func toRootless(d *appsv1.Deployment) error {
|
|||||||
d.Spec.Template.ObjectMeta.Annotations["container.seccomp.security.alpha.kubernetes.io/"+containerName] = "unconfined"
|
d.Spec.Template.ObjectMeta.Annotations["container.seccomp.security.alpha.kubernetes.io/"+containerName] = "unconfined"
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type config struct {
|
||||||
|
name string
|
||||||
|
path string
|
||||||
|
files map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitConfigFiles(m map[string][]byte) []config {
|
||||||
|
var c []config
|
||||||
|
idx := map[string]int{}
|
||||||
|
nameIdx := 0
|
||||||
|
for k, v := range m {
|
||||||
|
dir := path.Dir(k)
|
||||||
|
i, ok := idx[dir]
|
||||||
|
if !ok {
|
||||||
|
idx[dir] = len(c)
|
||||||
|
i = len(c)
|
||||||
|
name := "config"
|
||||||
|
if dir != "." {
|
||||||
|
nameIdx++
|
||||||
|
name = fmt.Sprintf("%s-%d", name, nameIdx)
|
||||||
|
}
|
||||||
|
c = append(c, config{
|
||||||
|
path: dir,
|
||||||
|
name: name,
|
||||||
|
files: map[string]string{},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
c[i].files[path.Base(k)] = string(v)
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
|
|
||||||
dockerclient "github.com/docker/docker/client"
|
dockerclient "github.com/docker/docker/client"
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -52,9 +53,10 @@ type InitConfig struct {
|
|||||||
DockerAPI dockerclient.APIClient
|
DockerAPI dockerclient.APIClient
|
||||||
KubeClientConfig KubeClientConfig
|
KubeClientConfig KubeClientConfig
|
||||||
BuildkitFlags []string
|
BuildkitFlags []string
|
||||||
ConfigFile string
|
Files map[string][]byte
|
||||||
DriverOpts map[string]string
|
DriverOpts map[string]string
|
||||||
Auth Auth
|
Auth Auth
|
||||||
|
Platforms []specs.Platform
|
||||||
// ContextPathHash can be used for determining pods in the driver instance
|
// ContextPathHash can be used for determining pods in the driver instance
|
||||||
ContextPathHash string
|
ContextPathHash string
|
||||||
}
|
}
|
||||||
@@ -101,16 +103,17 @@ func GetFactory(name string, instanceRequired bool) Factory {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetDriver(ctx context.Context, name string, f Factory, api dockerclient.APIClient, auth Auth, kcc KubeClientConfig, flags []string, config string, do map[string]string, contextPathHash string) (Driver, error) {
|
func GetDriver(ctx context.Context, name string, f Factory, api dockerclient.APIClient, auth Auth, kcc KubeClientConfig, flags []string, files map[string][]byte, do map[string]string, platforms []specs.Platform, contextPathHash string) (Driver, error) {
|
||||||
ic := InitConfig{
|
ic := InitConfig{
|
||||||
DockerAPI: api,
|
DockerAPI: api,
|
||||||
KubeClientConfig: kcc,
|
KubeClientConfig: kcc,
|
||||||
Name: name,
|
Name: name,
|
||||||
BuildkitFlags: flags,
|
BuildkitFlags: flags,
|
||||||
ConfigFile: config,
|
|
||||||
DriverOpts: do,
|
DriverOpts: do,
|
||||||
Auth: auth,
|
Auth: auth,
|
||||||
|
Platforms: platforms,
|
||||||
ContextPathHash: contextPathHash,
|
ContextPathHash: contextPathHash,
|
||||||
|
Files: files,
|
||||||
}
|
}
|
||||||
if f == nil {
|
if f == nil {
|
||||||
var err error
|
var err error
|
||||||
@@ -126,8 +129,15 @@ func GetDriver(ctx context.Context, name string, f Factory, api dockerclient.API
|
|||||||
return &cachedDriver{Driver: d}, nil
|
return &cachedDriver{Driver: d}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetFactories() map[string]Factory {
|
func GetFactories() []Factory {
|
||||||
return drivers
|
ds := make([]Factory, 0, len(drivers))
|
||||||
|
for _, d := range drivers {
|
||||||
|
ds = append(ds, d)
|
||||||
|
}
|
||||||
|
sort.Slice(ds, func(i, j int) bool {
|
||||||
|
return ds[i].Name() < ds[j].Name()
|
||||||
|
})
|
||||||
|
return ds
|
||||||
}
|
}
|
||||||
|
|
||||||
type cachedDriver struct {
|
type cachedDriver struct {
|
||||||
|
|||||||
62
go.mod
62
go.mod
@@ -1,68 +1,64 @@
|
|||||||
module github.com/docker/buildx
|
module github.com/docker/buildx
|
||||||
|
|
||||||
go 1.13
|
go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
|
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
|
||||||
github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 // indirect
|
|
||||||
github.com/bugsnag/bugsnag-go v1.4.1 // indirect
|
github.com/bugsnag/bugsnag-go v1.4.1 // indirect
|
||||||
github.com/bugsnag/panicwrap v1.2.0 // indirect
|
github.com/bugsnag/panicwrap v1.2.0 // indirect
|
||||||
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
|
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
|
||||||
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
|
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
|
||||||
github.com/containerd/console v1.0.1
|
github.com/compose-spec/compose-go v0.0.0-20210729195839-de56f4f0cb3c
|
||||||
github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc
|
github.com/containerd/console v1.0.3
|
||||||
github.com/denisenkom/go-mssqldb v0.0.0-20190315220205-a8ed825ac853 // indirect
|
github.com/containerd/containerd v1.5.5
|
||||||
github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible
|
github.com/docker/cli v20.10.8+incompatible
|
||||||
|
github.com/docker/cli-docs-tool v0.2.1
|
||||||
github.com/docker/compose-on-kubernetes v0.4.19-0.20190128150448-356b2919c496 // indirect
|
github.com/docker/compose-on-kubernetes v0.4.19-0.20190128150448-356b2919c496 // indirect
|
||||||
github.com/docker/distribution v2.7.1+incompatible
|
github.com/docker/distribution v2.7.1+incompatible
|
||||||
github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible
|
github.com/docker/docker v20.10.7+incompatible
|
||||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||||
github.com/docker/go-metrics v0.0.1 // indirect
|
github.com/docker/go-units v0.4.0
|
||||||
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 // indirect
|
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 // indirect
|
||||||
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect
|
|
||||||
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect
|
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect
|
||||||
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5 // indirect
|
|
||||||
github.com/fvbommel/sortorder v1.0.1 // indirect
|
github.com/fvbommel/sortorder v1.0.1 // indirect
|
||||||
github.com/gofrs/flock v0.7.3
|
github.com/gofrs/flock v0.7.3
|
||||||
github.com/gofrs/uuid v3.2.0+incompatible // indirect
|
github.com/gofrs/uuid v3.3.0+incompatible // indirect
|
||||||
github.com/google/certificate-transparency-go v1.0.21 // indirect
|
github.com/google/certificate-transparency-go v1.0.21 // indirect
|
||||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
||||||
github.com/hashicorp/hcl/v2 v2.6.0
|
github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840
|
||||||
|
github.com/hashicorp/hcl/v2 v2.8.2
|
||||||
github.com/jinzhu/gorm v1.9.2 // indirect
|
github.com/jinzhu/gorm v1.9.2 // indirect
|
||||||
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
||||||
github.com/jinzhu/now v1.0.0 // indirect
|
|
||||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||||
github.com/mattn/go-sqlite3 v1.10.0 // indirect
|
github.com/moby/buildkit v0.9.1-0.20211019185819-8778943ac3da
|
||||||
github.com/miekg/pkcs11 v0.0.0-20190322140431-074fd7a1ed19 // indirect
|
|
||||||
github.com/moby/buildkit v0.8.1-0.20201205083753-0af7b1b9c693
|
|
||||||
github.com/opencontainers/go-digest v1.0.0
|
github.com/opencontainers/go-digest v1.0.0
|
||||||
github.com/opencontainers/image-spec v1.0.1
|
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283
|
||||||
|
github.com/pelletier/go-toml v1.9.4
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/prometheus/client_golang v1.7.1 // indirect
|
|
||||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
||||||
github.com/sirupsen/logrus v1.7.0
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/spf13/cobra v1.0.0
|
github.com/spf13/cobra v1.2.1
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/stretchr/testify v1.5.1
|
github.com/stretchr/testify v1.7.0
|
||||||
github.com/theupdateframework/notary v0.6.1 // indirect
|
github.com/theupdateframework/notary v0.6.1 // indirect
|
||||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
||||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
github.com/zclconf/go-cty v1.7.1
|
||||||
github.com/zclconf/go-cty v1.4.0
|
go.opentelemetry.io/otel v1.0.0-RC1
|
||||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
|
go.opentelemetry.io/otel/trace v1.0.0-RC1
|
||||||
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||||
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
||||||
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
|
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
|
||||||
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
||||||
k8s.io/api v0.19.0
|
k8s.io/api v0.22.1
|
||||||
k8s.io/apimachinery v0.19.0
|
k8s.io/apimachinery v0.22.1
|
||||||
k8s.io/client-go v0.19.0
|
k8s.io/client-go v0.22.1
|
||||||
)
|
)
|
||||||
|
|
||||||
replace (
|
replace (
|
||||||
// protobuf: corresponds to containerd (through buildkit)
|
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20210702143511-f782d1355eff+incompatible
|
||||||
github.com/golang/protobuf => github.com/golang/protobuf v1.3.5
|
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20210817025855-ba2adeebdb8d+incompatible
|
||||||
github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.0.0-20210714055410-d010b05b4939
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/net/http/httptrace/otelhttptrace v0.0.0-20210714055410-d010b05b4939
|
||||||
// genproto: corresponds to containerd (through buildkit)
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/net/http/otelhttp v0.0.0-20210714055410-d010b05b4939
|
||||||
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
|
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,13 +0,0 @@
|
|||||||
{
|
|
||||||
"Vendor": true,
|
|
||||||
"Deadline": "8m",
|
|
||||||
"Exclude": [".*.pb.go"],
|
|
||||||
"DisableAll": true,
|
|
||||||
"Enable": [
|
|
||||||
"gofmt",
|
|
||||||
"goimports",
|
|
||||||
"ineffassign",
|
|
||||||
"vet",
|
|
||||||
"deadcode"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
. $(dirname $0)/util
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
: ${TARGETPLATFORM=$CLI_PLATFORM}
|
|
||||||
|
|
||||||
platformFlag=""
|
|
||||||
if [ -n "$TARGETPLATFORM" ]; then
|
|
||||||
platformFlag="--platform $TARGETPLATFORM"
|
|
||||||
fi
|
|
||||||
|
|
||||||
buildxCmd build $platformFlag \
|
|
||||||
--target "binaries" \
|
|
||||||
--output "type=local,dest=./bin/" \
|
|
||||||
.
|
|
||||||
@@ -1,38 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
TYP=$1
|
|
||||||
|
|
||||||
. $(dirname $0)/util
|
|
||||||
set -e
|
|
||||||
|
|
||||||
usage() {
|
|
||||||
echo "usage: ./hack/build_ci_first_pass <binaries>"
|
|
||||||
exit 1
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ -z "$TYP" ]; then
|
|
||||||
usage
|
|
||||||
fi
|
|
||||||
|
|
||||||
importCacheFlags=""
|
|
||||||
exportCacheFlags=""
|
|
||||||
if [ "$GITHUB_ACTIONS" = "true" ]; then
|
|
||||||
if [ -n "$cacheRefFrom" ]; then
|
|
||||||
importCacheFlags="--cache-from=type=local,src=$cacheRefFrom"
|
|
||||||
fi
|
|
||||||
if [ -n "$cacheRefTo" ]; then
|
|
||||||
exportCacheFlags="--cache-to=type=local,dest=$cacheRefTo"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
case $TYP in
|
|
||||||
"binaries")
|
|
||||||
buildxCmd build $importCacheFlags $exportCacheFlags \
|
|
||||||
--target "binaries" \
|
|
||||||
$currentcontext
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo >&2 "Unknown type $TYP"
|
|
||||||
exit 1
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
24
hack/cross
24
hack/cross
@@ -1,24 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
. $(dirname $0)/util
|
|
||||||
set -e
|
|
||||||
|
|
||||||
: ${TARGETPLATFORM=linux/amd64,linux/arm/v7,linux/arm64,darwin/amd64,windows/amd64,linux/ppc64le,linux/s390x}
|
|
||||||
: ${EXPORT_LOCAL=}
|
|
||||||
|
|
||||||
importCacheFlags=""
|
|
||||||
if [ "$GITHUB_ACTIONS" = "true" ]; then
|
|
||||||
if [ -n "$cacheRefFrom" ]; then
|
|
||||||
importCacheFlags="--cache-from=type=local,src=$cacheRefFrom"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
exportFlag=""
|
|
||||||
if [ -n "$EXPORT_LOCAL" ]; then
|
|
||||||
exportFlag="--output=type=local,dest=$EXPORT_LOCAL"
|
|
||||||
fi
|
|
||||||
|
|
||||||
buildxCmd build $importCacheFlags $exportFlag \
|
|
||||||
--target "binaries" \
|
|
||||||
--platform "$TARGETPLATFORM" \
|
|
||||||
$currentcontext
|
|
||||||
@@ -13,7 +13,3 @@ else
|
|||||||
( $dockerdCmd &>/var/log/dockerd.log & )
|
( $dockerdCmd &>/var/log/dockerd.log & )
|
||||||
exec ash
|
exec ash
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
33
hack/dockerfiles/authors.Dockerfile
Normal file
33
hack/dockerfiles/authors.Dockerfile
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
# syntax=docker/dockerfile:1.3-labs
|
||||||
|
|
||||||
|
FROM alpine:3.14 AS gen
|
||||||
|
RUN apk add --no-cache git
|
||||||
|
WORKDIR /src
|
||||||
|
RUN --mount=type=bind,target=. <<EOT
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
set -e
|
||||||
|
mkdir /out
|
||||||
|
# see also ".mailmap" for how email addresses and names are deduplicated
|
||||||
|
{
|
||||||
|
echo "# This file lists all individuals having contributed content to the repository."
|
||||||
|
echo "# For how it is generated, see hack/dockerfiles/authors.Dockerfile."
|
||||||
|
echo
|
||||||
|
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
|
||||||
|
} > /out/AUTHORS
|
||||||
|
cat /out/AUTHORS
|
||||||
|
EOT
|
||||||
|
|
||||||
|
FROM scratch AS update
|
||||||
|
COPY --from=gen /out /
|
||||||
|
|
||||||
|
FROM gen AS validate
|
||||||
|
RUN --mount=type=bind,target=.,rw <<EOT
|
||||||
|
set -e
|
||||||
|
git add -A
|
||||||
|
cp -rf /out/* .
|
||||||
|
if [ -n "$(git status --porcelain -- AUTHORS)" ]; then
|
||||||
|
echo >&2 'ERROR: Authors result differs. Please update with "make authors"'
|
||||||
|
git status --porcelain -- AUTHORS
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
42
hack/dockerfiles/docs.Dockerfile
Normal file
42
hack/dockerfiles/docs.Dockerfile
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
# syntax=docker/dockerfile:1.3-labs
|
||||||
|
|
||||||
|
ARG GO_VERSION=1.17
|
||||||
|
ARG FORMATS=md,yaml
|
||||||
|
|
||||||
|
FROM golang:${GO_VERSION}-alpine AS docsgen
|
||||||
|
WORKDIR /src
|
||||||
|
RUN --mount=target=. \
|
||||||
|
--mount=target=/root/.cache,type=cache \
|
||||||
|
go build -mod=vendor -o /out/docsgen ./docs/generate.go
|
||||||
|
|
||||||
|
FROM alpine AS gen
|
||||||
|
RUN apk add --no-cache rsync git
|
||||||
|
WORKDIR /src
|
||||||
|
COPY --from=docsgen /out/docsgen /usr/bin
|
||||||
|
ARG FORMATS
|
||||||
|
RUN --mount=target=/context \
|
||||||
|
--mount=target=.,type=tmpfs <<EOT
|
||||||
|
set -e
|
||||||
|
rsync -a /context/. .
|
||||||
|
docsgen --formats "$FORMATS" --source "docs/reference"
|
||||||
|
mkdir /out
|
||||||
|
cp -r docs/reference /out
|
||||||
|
EOT
|
||||||
|
|
||||||
|
FROM scratch AS update
|
||||||
|
COPY --from=gen /out /out
|
||||||
|
|
||||||
|
FROM gen AS validate
|
||||||
|
RUN --mount=target=/context \
|
||||||
|
--mount=target=.,type=tmpfs <<EOT
|
||||||
|
set -e
|
||||||
|
rsync -a /context/. .
|
||||||
|
git add -A
|
||||||
|
rm -rf docs/reference/*
|
||||||
|
cp -rf /out/* ./docs/
|
||||||
|
if [ -n "$(git status --porcelain -- docs/reference)" ]; then
|
||||||
|
echo >&2 'ERROR: Docs result differs. Please update with "make docs"'
|
||||||
|
git status --porcelain -- docs/reference
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
@@ -1,12 +1,12 @@
|
|||||||
# syntax=docker/dockerfile:1.0-experimental
|
# syntax=docker/dockerfile:1.3
|
||||||
|
|
||||||
FROM golang:1.13-alpine
|
ARG GO_VERSION=1.17
|
||||||
RUN apk add --no-cache git yamllint
|
|
||||||
RUN go get -u gopkg.in/alecthomas/gometalinter.v1 \
|
FROM golang:${GO_VERSION}-alpine
|
||||||
&& mv /go/bin/gometalinter.v1 /go/bin/gometalinter \
|
RUN apk add --no-cache gcc musl-dev yamllint
|
||||||
&& gometalinter --install
|
RUN wget -O- -nv https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.36.0
|
||||||
WORKDIR /go/src/github.com/docker/buildx
|
WORKDIR /go/src/github.com/docker/buildx
|
||||||
RUN --mount=target=/go/src/github.com/docker/buildx \
|
RUN --mount=target=/go/src/github.com/docker/buildx --mount=target=/root/.cache,type=cache \
|
||||||
gometalinter --config=gometalinter.json ./...
|
golangci-lint run
|
||||||
RUN --mount=target=/go/src/github.com/docker/buildx \
|
RUN --mount=target=/go/src/github.com/docker/buildx --mount=target=/root/.cache,type=cache \
|
||||||
yamllint -c .yamllint.yml --strict .
|
yamllint -c .yamllint.yml --strict .
|
||||||
|
|||||||
@@ -1,22 +1,44 @@
|
|||||||
# syntax = docker/dockerfile:1.0-experimental
|
# syntax=docker/dockerfile:1.3-labs
|
||||||
FROM golang:1.13-alpine AS vendored
|
|
||||||
RUN apk add --no-cache git rsync
|
ARG GO_VERSION=1.17
|
||||||
|
|
||||||
|
FROM golang:${GO_VERSION}-alpine AS base
|
||||||
|
RUN apk add --no-cache git rsync
|
||||||
WORKDIR /src
|
WORKDIR /src
|
||||||
|
|
||||||
|
FROM base AS vendored
|
||||||
RUN --mount=target=/context \
|
RUN --mount=target=/context \
|
||||||
--mount=target=.,type=tmpfs,readwrite \
|
--mount=target=.,type=tmpfs \
|
||||||
--mount=target=/go/pkg/mod,type=cache \
|
--mount=target=/go/pkg/mod,type=cache <<EOT
|
||||||
rsync -a /context/. . && \
|
set -e
|
||||||
go mod tidy && go mod vendor && \
|
rsync -a /context/. .
|
||||||
mkdir /out && cp -r go.mod go.sum vendor /out
|
go mod tidy
|
||||||
|
go mod vendor
|
||||||
|
mkdir /out
|
||||||
|
cp -r go.mod go.sum vendor /out
|
||||||
|
EOT
|
||||||
|
|
||||||
FROM scratch AS update
|
FROM scratch AS update
|
||||||
COPY --from=vendored /out /out
|
COPY --from=vendored /out /out
|
||||||
|
|
||||||
FROM vendored AS validate
|
FROM vendored AS validate
|
||||||
RUN --mount=target=/context \
|
RUN --mount=target=/context \
|
||||||
--mount=target=.,type=tmpfs,readwrite \
|
--mount=target=.,type=tmpfs <<EOT
|
||||||
rsync -a /context/. . && \
|
set -e
|
||||||
git add -A && \
|
rsync -a /context/. .
|
||||||
rm -rf vendor && \
|
git add -A
|
||||||
cp -rf /out/* . && \
|
rm -rf vendor
|
||||||
./hack/validate-vendor check
|
cp -rf /out/* .
|
||||||
|
if [ -n "$(git status --porcelain -- go.mod go.sum vendor)" ]; then
|
||||||
|
echo >&2 'ERROR: Vendor result differs. Please vendor your package with "make vendor"'
|
||||||
|
git status --porcelain -- go.mod go.sum vendor
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
EOT
|
||||||
|
|
||||||
|
FROM psampaz/go-mod-outdated:v0.8.0 AS go-mod-outdated
|
||||||
|
FROM base AS outdated
|
||||||
|
RUN --mount=target=.,ro \
|
||||||
|
--mount=target=/go/pkg/mod,type=cache \
|
||||||
|
--mount=from=go-mod-outdated,source=/home/go-mod-outdated,target=/usr/bin/go-mod-outdated \
|
||||||
|
go list -mod=readonly -u -m -json all | go-mod-outdated -update -direct
|
||||||
|
|||||||
@@ -1,21 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -eu -o pipefail -x
|
|
||||||
|
|
||||||
if [ -x "$(command -v greadlink)" ]; then
|
|
||||||
# on macOS, GNU readlink is ava (greadlink) can be installed through brew install coreutils
|
|
||||||
cd "$(dirname "$(greadlink -f "$BASH_SOURCE")")/.."
|
|
||||||
else
|
|
||||||
cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.."
|
|
||||||
fi
|
|
||||||
|
|
||||||
# see also ".mailmap" for how email addresses and names are deduplicated
|
|
||||||
|
|
||||||
{
|
|
||||||
cat <<-'EOH'
|
|
||||||
# This file lists all individuals having contributed content to the repository.
|
|
||||||
# For how it is generated, see `scripts/generate-authors.sh`.
|
|
||||||
EOH
|
|
||||||
echo
|
|
||||||
git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf
|
|
||||||
} > AUTHORS
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
. $(dirname $0)/util
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
buildxCmd build --file ./hack/dockerfiles/lint.Dockerfile .
|
|
||||||
37
hack/release
37
hack/release
@@ -1,31 +1,20 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
TAG=$1
|
|
||||||
OUT=$2
|
|
||||||
|
|
||||||
. $(dirname $0)/util
|
|
||||||
set -eu -o pipefail
|
set -eu -o pipefail
|
||||||
|
|
||||||
: ${PLATFORMS=linux/amd64}
|
: ${BUILDX_CMD=docker buildx}
|
||||||
|
: ${RELEASE_OUT=./release-out}
|
||||||
|
|
||||||
usage() {
|
# release
|
||||||
echo "usage: ./hack/release <tag> <out>"
|
(set -x ; ${BUILDX_CMD} bake --set "*.output=$RELEASE_OUT" release)
|
||||||
|
|
||||||
|
# wrap binaries
|
||||||
|
mv -f ./${RELEASE_OUT}/**/* ./${RELEASE_OUT}/
|
||||||
|
find ./${RELEASE_OUT} -type d -empty -delete
|
||||||
|
|
||||||
|
# checksums
|
||||||
|
if ! type shasum > /dev/null 2>&1; then
|
||||||
|
echo >&2 "ERROR: shasum is required"
|
||||||
exit 1
|
exit 1
|
||||||
}
|
|
||||||
|
|
||||||
if [ -z "$TAG" ] || [ -z "$OUT" ]; then
|
|
||||||
usage
|
|
||||||
fi
|
fi
|
||||||
|
find ./${RELEASE_OUT}/ -type f \( -iname "buildx-*" ! -iname "*darwin*" \) -print0 | sort -z | xargs -r0 shasum -a 256 -b | sed 's# .*/# #' > ./${RELEASE_OUT}/checksums.txt
|
||||||
importCacheFlags=""
|
|
||||||
if [[ -n "$cacheRefFrom" ]] && [[ "$cacheType" = "local" ]]; then
|
|
||||||
for ref in $cacheRefFrom; do
|
|
||||||
importCacheFlags="$importCacheFlags--cache-from=type=local,src=$ref "
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
buildxCmd build $importCacheFlags \
|
|
||||||
--target "release" \
|
|
||||||
--platform "$PLATFORMS" \
|
|
||||||
--output "type=local,dest=$OUT" \
|
|
||||||
$currentcontext
|
|
||||||
|
|||||||
11
hack/shell
11
hack/shell
@@ -2,17 +2,18 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
|
: ${BUILDX_CMD=docker buildx}
|
||||||
: ${TMUX=}
|
: ${TMUX=}
|
||||||
|
|
||||||
function clean {
|
function clean {
|
||||||
docker rmi $(cat $iidfile)
|
docker rmi $iid
|
||||||
}
|
}
|
||||||
|
|
||||||
iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX)
|
iid=buildx-shell
|
||||||
DOCKER_BUILDKIT=1 docker build --iidfile $iidfile --target demo-env .
|
(set -x ; ${BUILDX_CMD} build --output "type=docker,name=$iid" --target shell .)
|
||||||
trap clean EXIT
|
trap clean EXIT
|
||||||
SSH=
|
SSH=
|
||||||
if [ -n "$MOUNT_SSH_AUTH_SOCK" ]; then
|
if [ -n "$MOUNT_SSH_AUTH_SOCK" ]; then
|
||||||
SSH="-v $SSH_AUTH_SOCK:$SSH_AUTH_SOCK -e SSH_AUTH_SOCK"
|
SSH="-v $SSH_AUTH_SOCK:$SSH_AUTH_SOCK -e SSH_AUTH_SOCK"
|
||||||
fi
|
fi
|
||||||
docker run $SSH -it --privileged --rm -e TMUX_ENTRYPOINT=$TMUX $(cat $iidfile)
|
docker run $SSH -it --privileged --rm -e TMUX_ENTRYPOINT=$TMUX $iid
|
||||||
|
|||||||
47
hack/test
47
hack/test
@@ -1,47 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
. $(dirname $0)/util
|
|
||||||
set -eu -o pipefail
|
|
||||||
|
|
||||||
: ${BUILDX_NOCACHE=}
|
|
||||||
: ${TEST_COVERAGE=}
|
|
||||||
|
|
||||||
importCacheFlags=""
|
|
||||||
if [ -n "$cacheRefFrom" ]; then
|
|
||||||
if [ "$cacheType" = "local" ]; then
|
|
||||||
for ref in $cacheRefFrom; do
|
|
||||||
importCacheFlags="$importCacheFlags--cache-from=type=local,src=$ref "
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
iid="buildx-tests"
|
|
||||||
iidfile=$(mktemp -t docker-iidfile.XXXXXXXXXX)
|
|
||||||
|
|
||||||
coverageVol=""
|
|
||||||
coverageFlags=""
|
|
||||||
if [ "$TEST_COVERAGE" = "1" ]; then
|
|
||||||
covdir="$(pwd)/coverage"
|
|
||||||
mkdir -p "$covdir"
|
|
||||||
coverageVol="-v $covdir:/coverage"
|
|
||||||
coverageFlags="-coverprofile=/coverage/coverage.txt -covermode=atomic"
|
|
||||||
fi
|
|
||||||
|
|
||||||
buildxCmd build $importCacheFlags \
|
|
||||||
--target "integration-tests" \
|
|
||||||
--output "type=docker,name=$iid" \
|
|
||||||
$currentcontext
|
|
||||||
|
|
||||||
cacheVolume="buildx-cache"
|
|
||||||
if ! docker inspect "$cacheVolume" > /dev/null 2>&1; then
|
|
||||||
cacheVolume=$(docker create --name=buildx-cache -v /root/.cache -v /go/pkg/mod alpine)
|
|
||||||
fi
|
|
||||||
|
|
||||||
docker run --rm -v /tmp $coverageVol --volumes-from=$cacheVolume --privileged $iid go test $coverageFlags ${TESTFLAGS:--v} ${TESTPKGS:-./...}
|
|
||||||
|
|
||||||
if [ -n "$BUILDX_NOCACHE" ]; then
|
|
||||||
docker rm -v $cacheVolume
|
|
||||||
fi
|
|
||||||
|
|
||||||
rm "$iidfile"
|
|
||||||
docker rmi $iid
|
|
||||||
101
hack/test-driver
Executable file
101
hack/test-driver
Executable file
@@ -0,0 +1,101 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu -o pipefail
|
||||||
|
|
||||||
|
: ${BUILDX_CMD=docker buildx}
|
||||||
|
: ${BUILDKIT_IMAGE=moby/buildkit:buildx-stable-1}
|
||||||
|
: ${BUILDKIT_CFG=}
|
||||||
|
: ${DRIVER=docker-container}
|
||||||
|
: ${DRIVER_OPT=}
|
||||||
|
: ${MULTI_NODE=0}
|
||||||
|
: ${PLATFORMS=linux/amd64,linux/arm64}
|
||||||
|
|
||||||
|
function clean {
|
||||||
|
rm -rf "$context"
|
||||||
|
${BUILDX_CMD} rm "$builderName"
|
||||||
|
}
|
||||||
|
|
||||||
|
context=$(mktemp -d -t buildx-output.XXXXXXXXXX)
|
||||||
|
dockerfile=${context}/Dockerfile
|
||||||
|
trap clean EXIT
|
||||||
|
|
||||||
|
builderName=buildx-test-$(openssl rand -hex 16)
|
||||||
|
buildPlatformFlag=
|
||||||
|
if [ "$DRIVER" = "docker" ]; then
|
||||||
|
builderName=default
|
||||||
|
else
|
||||||
|
buildPlatformFlag=--platform="${PLATFORMS}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
driverOpt=image=${BUILDKIT_IMAGE}
|
||||||
|
if [ -n "$DRIVER_OPT" ]; then
|
||||||
|
driverOpt=$driverOpt,$DRIVER_OPT
|
||||||
|
fi
|
||||||
|
|
||||||
|
# create builder except for docker driver
|
||||||
|
if [ "$DRIVER" != "docker" ]; then
|
||||||
|
if [ "${MULTI_NODE}" = "1" ]; then
|
||||||
|
firstNode=1
|
||||||
|
for platform in ${PLATFORMS//,/ }; do
|
||||||
|
createFlags=""
|
||||||
|
if [ -f "$BUILDKIT_CFG" ]; then
|
||||||
|
createFlags="$createFlags --config=${BUILDKIT_CFG}"
|
||||||
|
fi
|
||||||
|
if [ "$firstNode" = "0" ]; then
|
||||||
|
createFlags="$createFlags --append"
|
||||||
|
fi
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
${BUILDX_CMD} create ${createFlags} \
|
||||||
|
--name="${builderName}" \
|
||||||
|
--node="${builderName}-${platform/\//-}" \
|
||||||
|
--driver="${DRIVER}" \
|
||||||
|
--driver-opt="${driverOpt}" \
|
||||||
|
--platform="${platform}"
|
||||||
|
)
|
||||||
|
firstNode=0
|
||||||
|
done
|
||||||
|
else
|
||||||
|
createFlags=""
|
||||||
|
if [ -f "$BUILDKIT_CFG" ]; then
|
||||||
|
createFlags="$createFlags --config=${BUILDKIT_CFG}"
|
||||||
|
fi
|
||||||
|
(
|
||||||
|
set -x
|
||||||
|
${BUILDX_CMD} create ${createFlags} \
|
||||||
|
--name="${builderName}" \
|
||||||
|
--driver="${DRIVER}" \
|
||||||
|
--driver-opt="${driverOpt}" \
|
||||||
|
--platform="${PLATFORMS}"
|
||||||
|
)
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# multi-platform not supported by docker driver
|
||||||
|
buildPlatformFlag=
|
||||||
|
if [ "$DRIVER" != "docker" ]; then
|
||||||
|
buildPlatformFlag=--platform="${PLATFORMS}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
set -x
|
||||||
|
|
||||||
|
# inspect and bootstrap
|
||||||
|
${BUILDX_CMD} inspect --bootstrap --builder="${builderName}"
|
||||||
|
|
||||||
|
# create dockerfile
|
||||||
|
cat > "${dockerfile}" <<EOL
|
||||||
|
FROM busybox as build
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
ARG BUILDPLATFORM
|
||||||
|
RUN echo "I am running on \$BUILDPLATFORM, building for \$TARGETPLATFORM" > /log
|
||||||
|
FROM busybox
|
||||||
|
COPY --from=build /log /log
|
||||||
|
RUN cat /log
|
||||||
|
RUN uname -a
|
||||||
|
EOL
|
||||||
|
|
||||||
|
# build
|
||||||
|
${BUILDX_CMD} build ${buildPlatformFlag} \
|
||||||
|
--output="type=cacheonly" \
|
||||||
|
--builder="${builderName}" \
|
||||||
|
"${context}"
|
||||||
12
hack/update-docs
Executable file
12
hack/update-docs
Executable file
@@ -0,0 +1,12 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
|
set -eu -o pipefail
|
||||||
|
|
||||||
|
: ${BUILDX_CMD=docker buildx}
|
||||||
|
: ${FORMATS=md}
|
||||||
|
|
||||||
|
output=$(mktemp -d -t buildx-output.XXXXXXXXXX)
|
||||||
|
(set -x ; DOCS_FORMATS=$FORMATS ${BUILDX_CMD} bake --set "*.output=$output" update-docs)
|
||||||
|
rm -rf ./docs/reference/*
|
||||||
|
cp -R "$output"/out/* ./docs/
|
||||||
|
rm -rf $output
|
||||||
@@ -1,16 +1,11 @@
|
|||||||
#!/usr/bin/env bash
|
#!/usr/bin/env bash
|
||||||
|
|
||||||
. $(dirname $0)/util
|
set -eu -o pipefail
|
||||||
set -eu
|
|
||||||
|
: ${BUILDX_CMD=docker buildx}
|
||||||
|
|
||||||
output=$(mktemp -d -t buildx-output.XXXXXXXXXX)
|
output=$(mktemp -d -t buildx-output.XXXXXXXXXX)
|
||||||
|
(set -x ; ${BUILDX_CMD} bake --set "*.output=$output" update-vendor)
|
||||||
buildxCmd build \
|
|
||||||
--target "update" \
|
|
||||||
--output "type=local,dest=$output" \
|
|
||||||
--file "./hack/dockerfiles/vendor.Dockerfile" \
|
|
||||||
.
|
|
||||||
|
|
||||||
rm -rf ./vendor
|
rm -rf ./vendor
|
||||||
cp -R "$output"/out/* .
|
cp -R "$output"/out/* .
|
||||||
rm -rf $output
|
rm -rf $output
|
||||||
|
|||||||
66
hack/util
66
hack/util
@@ -1,66 +0,0 @@
|
|||||||
#!/usr/bin/env sh
|
|
||||||
|
|
||||||
: ${CI=}
|
|
||||||
: ${PREFER_BUILDCTL=}
|
|
||||||
: ${PREFER_LEGACY=}
|
|
||||||
: ${CLI_PLATFORM=}
|
|
||||||
: ${GITHUB_ACTIONS=}
|
|
||||||
: ${CACHEDIR_FROM=}
|
|
||||||
: ${CACHEDIR_TO=}
|
|
||||||
|
|
||||||
if [ "$PREFER_BUILDCTL" = "1" ]; then
|
|
||||||
echo >&2 "WARNING: PREFER_BUILDCTL is no longer supported. Ignoring."
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$PREFER_LEGACY" = "1" ]; then
|
|
||||||
echo >&2 "WARNING: PREFER_LEGACY is no longer supported. Ignoring."
|
|
||||||
fi
|
|
||||||
|
|
||||||
progressFlag=""
|
|
||||||
if [ "$CI" = "true" ]; then
|
|
||||||
progressFlag="--progress=plain"
|
|
||||||
fi
|
|
||||||
|
|
||||||
buildxCmd() {
|
|
||||||
if docker buildx version >/dev/null 2>&1; then
|
|
||||||
set -x
|
|
||||||
docker buildx "$@" $progressFlag
|
|
||||||
elif buildx version >/dev/null 2>&1; then
|
|
||||||
set -x
|
|
||||||
buildx "$@" $progressFlag
|
|
||||||
elif docker version >/dev/null 2>&1; then
|
|
||||||
set -x
|
|
||||||
DOCKER_BUILDKIT=1 docker "$@" $progressFlag
|
|
||||||
else
|
|
||||||
echo >&2 "ERROR: Please enable DOCKER_BUILDKIT or install standalone buildx"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ -z "$CLI_PLATFORM" ]; then
|
|
||||||
if [ "$(uname -s)" = "Darwin" ]; then
|
|
||||||
arch="$(uname -m)"
|
|
||||||
if [ "$arch" = "x86_64" ]; then
|
|
||||||
arch="amd64"
|
|
||||||
fi
|
|
||||||
CLI_PLATFORM="darwin/$arch"
|
|
||||||
elif uname -s | grep MINGW > /dev/null 2>&1 ; then
|
|
||||||
CLI_PLATFORM="windows/amd64"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
cacheType=""
|
|
||||||
cacheRefFrom=""
|
|
||||||
cacheRefTo=""
|
|
||||||
currentref=""
|
|
||||||
if [ "$GITHUB_ACTIONS" = "true" ]; then
|
|
||||||
currentref="git://github.com/$GITHUB_REPOSITORY#$GITHUB_REF"
|
|
||||||
cacheType="local"
|
|
||||||
cacheRefFrom="$CACHEDIR_FROM"
|
|
||||||
cacheRefTo="$CACHEDIR_TO"
|
|
||||||
fi
|
|
||||||
|
|
||||||
currentcontext="."
|
|
||||||
if [ -n "$currentref" ]; then
|
|
||||||
currentcontext="--build-arg BUILDKIT_CONTEXT_KEEP_GIT_DIR=1 $currentref"
|
|
||||||
fi
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
#!/usr/bin/env sh
|
|
||||||
set -eu
|
|
||||||
|
|
||||||
case ${1:-} in
|
|
||||||
'')
|
|
||||||
. $(dirname $0)/util
|
|
||||||
buildxCmd build \
|
|
||||||
--target validate \
|
|
||||||
--file ./hack/dockerfiles/vendor.Dockerfile \
|
|
||||||
.
|
|
||||||
;;
|
|
||||||
check)
|
|
||||||
status="$(git status --porcelain -- go.mod go.sum vendor 2>/dev/null)"
|
|
||||||
diffs=$(echo "$status" | grep -v '^[RAD] ' || true)
|
|
||||||
if [ "$diffs" ]; then
|
|
||||||
{
|
|
||||||
set +x
|
|
||||||
echo 'The result of "make vendor" differs'
|
|
||||||
echo
|
|
||||||
echo "$diffs"
|
|
||||||
echo
|
|
||||||
echo 'Please vendor your package with "make vendor"'
|
|
||||||
echo
|
|
||||||
} >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
echo 'Congratulations! All vendoring changes are done the right way.'
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/containerd/containerd/platforms"
|
"github.com/containerd/containerd/platforms"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
"github.com/docker/buildx/util/platformutil"
|
"github.com/docker/buildx/util/platformutil"
|
||||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -21,8 +22,9 @@ type Node struct {
|
|||||||
Endpoint string
|
Endpoint string
|
||||||
Platforms []specs.Platform
|
Platforms []specs.Platform
|
||||||
Flags []string
|
Flags []string
|
||||||
ConfigFile string
|
|
||||||
DriverOpts map[string]string
|
DriverOpts map[string]string
|
||||||
|
|
||||||
|
Files map[string][]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ng *NodeGroup) Leave(name string) error {
|
func (ng *NodeGroup) Leave(name string) error {
|
||||||
@@ -88,10 +90,18 @@ func (ng *NodeGroup) Update(name, endpoint string, platforms []string, endpoints
|
|||||||
Name: name,
|
Name: name,
|
||||||
Endpoint: endpoint,
|
Endpoint: endpoint,
|
||||||
Platforms: pp,
|
Platforms: pp,
|
||||||
ConfigFile: configFile,
|
|
||||||
Flags: flags,
|
Flags: flags,
|
||||||
DriverOpts: do,
|
DriverOpts: do,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if configFile != "" {
|
||||||
|
files, err := confutil.LoadConfigFiles(configFile)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n.Files = files
|
||||||
|
}
|
||||||
|
|
||||||
ng.Nodes = append(ng.Nodes, n)
|
ng.Nodes = append(ng.Nodes, n)
|
||||||
|
|
||||||
if err := ng.validateDuplicates(endpoint, len(ng.Nodes)-1); err != nil {
|
if err := ng.validateDuplicates(endpoint, len(ng.Nodes)-1); err != nil {
|
||||||
|
|||||||
@@ -2,13 +2,13 @@ package store
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
"github.com/gofrs/flock"
|
"github.com/gofrs/flock"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@@ -95,7 +95,7 @@ func (t *Txn) Save(ng *NodeGroup) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return atomicWriteFile(filepath.Join(t.s.root, "instances", name), dt, 0600)
|
return ioutils.AtomicWriteFile(filepath.Join(t.s.root, "instances", name), dt, 0600)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Txn) Remove(name string) error {
|
func (t *Txn) Remove(name string) error {
|
||||||
@@ -116,14 +116,14 @@ func (t *Txn) SetCurrent(key, name string, global, def bool) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := atomicWriteFile(filepath.Join(t.s.root, "current"), dt, 0600); err != nil {
|
if err := ioutils.AtomicWriteFile(filepath.Join(t.s.root, "current"), dt, 0600); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
h := toHash(key)
|
h := toHash(key)
|
||||||
|
|
||||||
if def {
|
if def {
|
||||||
if err := atomicWriteFile(filepath.Join(t.s.root, "defaults", h), []byte(name), 0600); err != nil {
|
if err := ioutils.AtomicWriteFile(filepath.Join(t.s.root, "defaults", h), []byte(name), 0600); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -137,7 +137,7 @@ func (t *Txn) reset(key string) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := atomicWriteFile(filepath.Join(t.s.root, "current"), dt, 0600); err != nil {
|
if err := ioutils.AtomicWriteFile(filepath.Join(t.s.root, "current"), dt, 0600); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@@ -202,35 +202,6 @@ type current struct {
|
|||||||
|
|
||||||
var namePattern = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9\.\-_]*$`)
|
var namePattern = regexp.MustCompile(`^[a-zA-Z][a-zA-Z0-9\.\-_]*$`)
|
||||||
|
|
||||||
func atomicWriteFile(filename string, data []byte, perm os.FileMode) error {
|
|
||||||
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = os.Chmod(f.Name(), perm)
|
|
||||||
if err != nil {
|
|
||||||
f.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
n, err := f.Write(data)
|
|
||||||
if err == nil && n < len(data) {
|
|
||||||
f.Close()
|
|
||||||
return io.ErrShortWrite
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
f.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := f.Sync(); err != nil {
|
|
||||||
f.Close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := f.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return os.Rename(f.Name(), filename)
|
|
||||||
}
|
|
||||||
|
|
||||||
func toHash(in string) string {
|
func toHash(in string) string {
|
||||||
return digest.FromBytes([]byte(in)).Hex()[:20]
|
return digest.FromBytes([]byte(in)).Hex()[:20]
|
||||||
}
|
}
|
||||||
|
|||||||
166
store/storeutil/storeutil.go
Normal file
166
store/storeutil/storeutil.go
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
package storeutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/buildx/store"
|
||||||
|
"github.com/docker/buildx/util/confutil"
|
||||||
|
"github.com/docker/buildx/util/imagetools"
|
||||||
|
"github.com/docker/buildx/util/resolver"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/cli/context/docker"
|
||||||
|
buildkitdconfig "github.com/moby/buildkit/cmd/buildkitd/config"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetStore returns current builder instance store
|
||||||
|
func GetStore(dockerCli command.Cli) (*store.Txn, func(), error) {
|
||||||
|
s, err := store.New(confutil.ConfigDir(dockerCli))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
return s.Txn()
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCurrentEndpoint returns the current default endpoint value
|
||||||
|
func GetCurrentEndpoint(dockerCli command.Cli) (string, error) {
|
||||||
|
name := dockerCli.CurrentContext()
|
||||||
|
if name != "default" {
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
|
de, err := GetDockerEndpoint(dockerCli, name)
|
||||||
|
if err != nil {
|
||||||
|
return "", errors.Errorf("docker endpoint for %q not found", name)
|
||||||
|
}
|
||||||
|
return de, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDockerEndpoint returns docker endpoint string for given context
|
||||||
|
func GetDockerEndpoint(dockerCli command.Cli, name string) (string, error) {
|
||||||
|
list, err := dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
for _, l := range list {
|
||||||
|
if l.Name == name {
|
||||||
|
ep, ok := l.Endpoints["docker"]
|
||||||
|
if !ok {
|
||||||
|
return "", errors.Errorf("context %q does not have a Docker endpoint", name)
|
||||||
|
}
|
||||||
|
typed, ok := ep.(docker.EndpointMeta)
|
||||||
|
if !ok {
|
||||||
|
return "", errors.Errorf("endpoint %q is not of type EndpointMeta, %T", ep, ep)
|
||||||
|
}
|
||||||
|
return typed.Host, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetCurrentInstance finds the current builder instance
|
||||||
|
func GetCurrentInstance(txn *store.Txn, dockerCli command.Cli) (*store.NodeGroup, error) {
|
||||||
|
ep, err := GetCurrentEndpoint(dockerCli)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ng, err := txn.Current(ep)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if ng == nil {
|
||||||
|
ng, _ = GetNodeGroup(txn, dockerCli, dockerCli.CurrentContext())
|
||||||
|
}
|
||||||
|
|
||||||
|
return ng, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetNodeGroup returns nodegroup based on the name
|
||||||
|
func GetNodeGroup(txn *store.Txn, dockerCli command.Cli, name string) (*store.NodeGroup, error) {
|
||||||
|
ng, err := txn.NodeGroupByName(name)
|
||||||
|
if err != nil {
|
||||||
|
if !os.IsNotExist(errors.Cause(err)) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ng != nil {
|
||||||
|
return ng, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if name == "default" {
|
||||||
|
name = dockerCli.CurrentContext()
|
||||||
|
}
|
||||||
|
|
||||||
|
list, err := dockerCli.ContextStore().List()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, l := range list {
|
||||||
|
if l.Name == name {
|
||||||
|
return &store.NodeGroup{
|
||||||
|
Name: "default",
|
||||||
|
Nodes: []store.Node{
|
||||||
|
{
|
||||||
|
Name: "default",
|
||||||
|
Endpoint: name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.Errorf("no builder %q found", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetImageConfig(dockerCli command.Cli, ng *store.NodeGroup) (opt imagetools.Opt, err error) {
|
||||||
|
opt.Auth = dockerCli.ConfigFile()
|
||||||
|
|
||||||
|
if ng == nil || len(ng.Nodes) == 0 {
|
||||||
|
return opt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
files := ng.Nodes[0].Files
|
||||||
|
|
||||||
|
dt, ok := files["buildkitd.toml"]
|
||||||
|
if !ok {
|
||||||
|
return opt, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
config, err := buildkitdconfig.Load(bytes.NewReader(dt))
|
||||||
|
if err != nil {
|
||||||
|
return opt, err
|
||||||
|
}
|
||||||
|
|
||||||
|
regconfig := make(map[string]resolver.RegistryConfig)
|
||||||
|
|
||||||
|
for k, v := range config.Registries {
|
||||||
|
rc := resolver.RegistryConfig{
|
||||||
|
Mirrors: v.Mirrors,
|
||||||
|
PlainHTTP: v.PlainHTTP,
|
||||||
|
Insecure: v.Insecure,
|
||||||
|
}
|
||||||
|
for _, ca := range v.RootCAs {
|
||||||
|
dt, ok := files[strings.TrimPrefix(ca, confutil.DefaultBuildKitConfigDir+"/")]
|
||||||
|
if ok {
|
||||||
|
rc.RootCAs = append(rc.RootCAs, dt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, kp := range v.KeyPairs {
|
||||||
|
key, keyok := files[strings.TrimPrefix(kp.Key, confutil.DefaultBuildKitConfigDir+"/")]
|
||||||
|
cert, certok := files[strings.TrimPrefix(kp.Certificate, confutil.DefaultBuildKitConfigDir+"/")]
|
||||||
|
if keyok && certok {
|
||||||
|
rc.KeyPairs = append(rc.KeyPairs, resolver.TLSKeyPair{
|
||||||
|
Key: key,
|
||||||
|
Certificate: cert,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
regconfig[k] = rc
|
||||||
|
}
|
||||||
|
|
||||||
|
opt.RegistryConfig = regconfig
|
||||||
|
|
||||||
|
return opt, nil
|
||||||
|
}
|
||||||
@@ -1,7 +1,8 @@
|
|||||||
package build
|
package buildflags
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/csv"
|
"encoding/csv"
|
||||||
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/moby/buildkit/client"
|
"github.com/moby/buildkit/client"
|
||||||
@@ -45,6 +46,9 @@ func ParseCacheEntry(in []string) ([]client.CacheOptionsEntry, error) {
|
|||||||
if im.Type == "" {
|
if im.Type == "" {
|
||||||
return nil, errors.Errorf("type required form> %q", in)
|
return nil, errors.Errorf("type required form> %q", in)
|
||||||
}
|
}
|
||||||
|
if !addGithubToken(&im) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
imports = append(imports, im)
|
imports = append(imports, im)
|
||||||
}
|
}
|
||||||
return imports, nil
|
return imports, nil
|
||||||
@@ -58,3 +62,20 @@ func isRefOnlyFormat(in []string) bool {
|
|||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func addGithubToken(ci *client.CacheOptionsEntry) bool {
|
||||||
|
if ci.Type != "gha" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if _, ok := ci.Attrs["token"]; !ok {
|
||||||
|
if v, ok := os.LookupEnv("ACTIONS_RUNTIME_TOKEN"); ok {
|
||||||
|
ci.Attrs["token"] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ok := ci.Attrs["url"]; !ok {
|
||||||
|
if v, ok := os.LookupEnv("ACTIONS_CACHE_URL"); ok {
|
||||||
|
ci.Attrs["url"] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ci.Attrs["token"] != "" && ci.Attrs["url"] != ""
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package build
|
package buildflags
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/moby/buildkit/util/entitlements"
|
"github.com/moby/buildkit/util/entitlements"
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
package build
|
package buildflags
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/csv"
|
"encoding/csv"
|
||||||
@@ -100,7 +100,9 @@ func ParseOutputs(inp []string) ([]client.ExportEntry, error) {
|
|||||||
delete(out.Attrs, "dest")
|
delete(out.Attrs, "dest")
|
||||||
case "registry":
|
case "registry":
|
||||||
out.Type = client.ExporterImage
|
out.Type = client.ExporterImage
|
||||||
out.Attrs["push"] = "true"
|
if _, ok := out.Attrs["push"]; !ok {
|
||||||
|
out.Attrs["push"] = "true"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
outs = append(outs, out)
|
outs = append(outs, out)
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user