run shfmt
git grep --name-only '^#!' | egrep -v '(vendor|\.go|Jenkinsfile)' | xargs shfmt -w -bn -ci -sr Signed-off-by: Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
This commit is contained in:
parent
eb484fcb67
commit
3cf82748dd
50 changed files with 513 additions and 441 deletions
|
@ -98,7 +98,8 @@ check_flag() {
|
|||
|
||||
check_flags() {
|
||||
for flag in "$@"; do
|
||||
echo -n "- "; check_flag "$flag"
|
||||
echo -n "- "
|
||||
check_flag "$flag"
|
||||
done
|
||||
}
|
||||
|
||||
|
@ -228,12 +229,15 @@ echo 'Optional Features:'
|
|||
}
|
||||
{
|
||||
if is_set LEGACY_VSYSCALL_NATIVE; then
|
||||
echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
|
||||
echo -n "- "
|
||||
wrap_bad "CONFIG_LEGACY_VSYSCALL_NATIVE" 'enabled'
|
||||
echo " $(wrap_color '(dangerous, provides an ASLR-bypassing target with usable ROP gadgets.)' bold black)"
|
||||
elif is_set LEGACY_VSYSCALL_EMULATE; then
|
||||
echo -n "- "; wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
|
||||
echo -n "- "
|
||||
wrap_good "CONFIG_LEGACY_VSYSCALL_EMULATE" 'enabled'
|
||||
elif is_set LEGACY_VSYSCALL_NONE; then
|
||||
echo -n "- "; wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
|
||||
echo -n "- "
|
||||
wrap_bad "CONFIG_LEGACY_VSYSCALL_NONE" 'enabled'
|
||||
echo " $(wrap_color '(containers using eglibc <= 2.13 will not work. Switch to' bold black)"
|
||||
echo " $(wrap_color ' "CONFIG_VSYSCALL_[NATIVE|EMULATE]" or use "vsyscall=[native|emulate]"' bold black)"
|
||||
echo " $(wrap_color ' on kernel command line. Note that this will disable ASLR for the,' bold black)"
|
||||
|
@ -334,9 +338,12 @@ check_flags OVERLAY_FS | sed 's/^/ /'
|
|||
EXITCODE=0
|
||||
|
||||
echo " - \"$(wrap_color 'zfs' blue)\":"
|
||||
echo -n " - "; check_device /dev/zfs
|
||||
echo -n " - "; check_command zfs
|
||||
echo -n " - "; check_command zpool
|
||||
echo -n " - "
|
||||
check_device /dev/zfs
|
||||
echo -n " - "
|
||||
check_command zfs
|
||||
echo -n " - "
|
||||
check_command zpool
|
||||
[ "$EXITCODE" = 0 ] && STORAGE=0
|
||||
EXITCODE=0
|
||||
|
||||
|
@ -345,8 +352,7 @@ EXITCODE=$CODE
|
|||
|
||||
echo
|
||||
|
||||
check_limit_over()
|
||||
{
|
||||
check_limit_over() {
|
||||
if [ "$(cat "$1")" -le "$2" ]; then
|
||||
wrap_bad "- $1" "$(cat "$1")"
|
||||
wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black
|
||||
|
|
|
@ -42,7 +42,8 @@ bundle_files(){
|
|||
}
|
||||
|
||||
control_docker() {
|
||||
m=$1; op=$2
|
||||
m=$1
|
||||
op=$2
|
||||
# NOTE: `docker-machine ssh $m sh -c "foo bar"` does not work
|
||||
# (but `docker-machine ssh $m sh -c "foo\ bar"` works)
|
||||
# Anyway we avoid using `sh -c` here for avoiding confusion
|
||||
|
@ -67,7 +68,9 @@ detect_prefix(){
|
|||
}
|
||||
|
||||
install_to() {
|
||||
m=$1; shift; files=$@
|
||||
m=$1
|
||||
shift
|
||||
files=$@
|
||||
echo "$m: detecting docker"
|
||||
prefix=$(detect_prefix $m)
|
||||
echo "$m: detected docker on $prefix"
|
||||
|
@ -89,7 +92,8 @@ check_prereq(){
|
|||
|
||||
case "$1" in
|
||||
"install")
|
||||
shift; machines=$@
|
||||
shift
|
||||
machines=$@
|
||||
check_prereq
|
||||
files=$(bundle_files)
|
||||
echo "Files to be installed:"
|
||||
|
@ -101,7 +105,10 @@ case "$1" in
|
|||
done
|
||||
status=0
|
||||
for pid in ${pids[@]}; do
|
||||
wait $pid || { status=$?; echo "background process $pid failed with exit status $status"; }
|
||||
wait $pid || {
|
||||
status=$?
|
||||
echo "background process $pid failed with exit status $status"
|
||||
}
|
||||
done
|
||||
exit $status
|
||||
;;
|
||||
|
|
|
@ -49,10 +49,14 @@ authService='registry.docker.io'
|
|||
|
||||
# https://github.com/moby/moby/issues/33700
|
||||
fetch_blob() {
|
||||
local token="$1"; shift
|
||||
local image="$1"; shift
|
||||
local digest="$1"; shift
|
||||
local targetFile="$1"; shift
|
||||
local token="$1"
|
||||
shift
|
||||
local image="$1"
|
||||
shift
|
||||
local digest="$1"
|
||||
shift
|
||||
local targetFile="$1"
|
||||
shift
|
||||
local curlArgs=("$@")
|
||||
|
||||
local curlHeaders="$(
|
||||
|
@ -81,7 +85,8 @@ fetch_blob() {
|
|||
|
||||
# handle 'application/vnd.docker.distribution.manifest.v2+json' manifest
|
||||
handle_single_manifest_v2() {
|
||||
local manifestJson="$1"; shift
|
||||
local manifestJson="$1"
|
||||
shift
|
||||
|
||||
local configDigest="$(echo "$manifestJson" | jq --raw-output '.config.digest')"
|
||||
local imageId="${configDigest#*:}" # strip off "sha256:"
|
||||
|
|
|
@ -8,9 +8,9 @@
|
|||
# $> ./generate_aliases <mailmap_file> > aliases
|
||||
#
|
||||
|
||||
cat $1 | \
|
||||
grep -v '^#' | \
|
||||
sed 's/^[^<]*<\([^>]*\)>/\1/' | \
|
||||
grep '<.*>' | sed -e 's/[<>]/ /g' | \
|
||||
awk '{if ($3 != "") { print $3" "$1 } else {print $2" "$1}}' | \
|
||||
sort | uniq
|
||||
cat $1 \
|
||||
| grep -v '^#' \
|
||||
| sed 's/^[^<]*<\([^>]*\)>/\1/' \
|
||||
| grep '<.*>' | sed -e 's/[<>]/ /g' \
|
||||
| awk '{if ($3 != "") { print $3" "$1 } else {print $2" "$1}}' \
|
||||
| sort | uniq
|
||||
|
|
|
@ -131,7 +131,7 @@ case "$1" in
|
|||
restart)
|
||||
check_init
|
||||
fail_unless_root
|
||||
docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null`
|
||||
docker_pid=$(cat "$DOCKER_SSD_PIDFILE" 2> /dev/null)
|
||||
[ -n "$docker_pid" ] \
|
||||
&& ps -p $docker_pid > /dev/null 2>&1 \
|
||||
&& $0 stop
|
||||
|
|
|
@ -112,7 +112,6 @@ rh_status_q() {
|
|||
rh_status > /dev/null 2>&1
|
||||
}
|
||||
|
||||
|
||||
check_for_cleanup() {
|
||||
if [ -f ${pidfile} ]; then
|
||||
/bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile}
|
||||
|
@ -148,6 +147,7 @@ case "$1" in
|
|||
*)
|
||||
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
|
||||
exit 2
|
||||
;;
|
||||
esac
|
||||
|
||||
exit $?
|
||||
|
|
|
@ -19,13 +19,13 @@ tmp() {
|
|||
}
|
||||
|
||||
apkv() {
|
||||
curl -sSL $MAINREPO/$ARCH/APKINDEX.tar.gz | tar -Oxz |
|
||||
grep --text '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2
|
||||
curl -sSL $MAINREPO/$ARCH/APKINDEX.tar.gz | tar -Oxz \
|
||||
| grep --text '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2
|
||||
}
|
||||
|
||||
getapk() {
|
||||
curl -sSL $MAINREPO/$ARCH/apk-tools-static-$(apkv).apk |
|
||||
tar -xz -C $TMP sbin/apk.static
|
||||
curl -sSL $MAINREPO/$ARCH/apk-tools-static-$(apkv).apk \
|
||||
| tar -xz -C $TMP sbin/apk.static
|
||||
}
|
||||
|
||||
mkbase() {
|
||||
|
|
|
@ -91,14 +91,12 @@ if [ -d /etc/yum/vars ]; then
|
|||
cp -a /etc/yum/vars "$target"/etc/yum/
|
||||
fi
|
||||
|
||||
if [[ -n "$install_groups" ]];
|
||||
then
|
||||
if [[ -n "$install_groups" ]]; then
|
||||
yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \
|
||||
--setopt=group_package_types=mandatory -y groupinstall "${install_groups[@]}"
|
||||
fi
|
||||
|
||||
if [[ -n "$install_packages" ]];
|
||||
then
|
||||
if [[ -n "$install_packages" ]]; then
|
||||
yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \
|
||||
--setopt=group_package_types=mandatory -y install "${install_packages[@]}"
|
||||
fi
|
||||
|
@ -129,8 +127,7 @@ rm -rf "$target"/etc/ld.so.cache "$target"/var/cache/ldconfig
|
|||
mkdir -p --mode=0755 "$target"/var/cache/ldconfig
|
||||
|
||||
if [ -z "$version" ]; then
|
||||
for file in "$target"/etc/{redhat,system}-release
|
||||
do
|
||||
for file in "$target"/etc/{redhat,system}-release; do
|
||||
if [ -r "$file" ]; then
|
||||
version="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' "$file")"
|
||||
break
|
||||
|
|
|
@ -28,12 +28,27 @@ tag=
|
|||
compression="auto"
|
||||
while true; do
|
||||
case "$1" in
|
||||
-d|--dir) dir="$2" ; shift 2 ;;
|
||||
-t|--tag) tag="$2" ; shift 2 ;;
|
||||
--compression) compression="$2" ; shift 2 ;;
|
||||
--no-compression) compression="none" ; shift 1 ;;
|
||||
-d | --dir)
|
||||
dir="$2"
|
||||
shift 2
|
||||
;;
|
||||
-t | --tag)
|
||||
tag="$2"
|
||||
shift 2
|
||||
;;
|
||||
--compression)
|
||||
compression="$2"
|
||||
shift 2
|
||||
;;
|
||||
--no-compression)
|
||||
compression="none"
|
||||
shift 1
|
||||
;;
|
||||
-h | --help) usage ;;
|
||||
--) shift ; break ;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
@ -41,8 +56,7 @@ script="$1"
|
|||
[ "$script" ] || usage
|
||||
shift
|
||||
|
||||
if [ "$compression" == 'auto' ] || [ -z "$compression" ]
|
||||
then
|
||||
if [ "$compression" == 'auto' ] || [ -z "$compression" ]; then
|
||||
compression='xz'
|
||||
fi
|
||||
|
||||
|
@ -68,7 +82,10 @@ if [ -z "$dir" ]; then
|
|||
fi
|
||||
|
||||
rootfsDir="$dir/rootfs"
|
||||
( set -x; mkdir -p "$rootfsDir" )
|
||||
(
|
||||
set -x
|
||||
mkdir -p "$rootfsDir"
|
||||
)
|
||||
|
||||
# pass all remaining arguments to $script
|
||||
"$scriptDir/$script" "$rootfsDir" "$@"
|
||||
|
@ -101,20 +118,35 @@ EOF
|
|||
# if our generated image has a decent shell, let's set a default command
|
||||
for shell in /bin/bash /usr/bin/fish /usr/bin/zsh /bin/sh; do
|
||||
if [ -x "$rootfsDir/$shell" ]; then
|
||||
( set -x; echo 'CMD ["'"$shell"'"]' >> "$dir/Dockerfile" )
|
||||
(
|
||||
set -x
|
||||
echo 'CMD ["'"$shell"'"]' >> "$dir/Dockerfile"
|
||||
)
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
( set -x; rm -rf "$rootfsDir" )
|
||||
(
|
||||
set -x
|
||||
rm -rf "$rootfsDir"
|
||||
)
|
||||
|
||||
if [ "$tag" ]; then
|
||||
( set -x; docker build -t "$tag" "$dir" )
|
||||
(
|
||||
set -x
|
||||
docker build -t "$tag" "$dir"
|
||||
)
|
||||
elif [ "$delDir" ]; then
|
||||
# if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_
|
||||
( set -x; docker build "$dir" )
|
||||
(
|
||||
set -x
|
||||
docker build "$dir"
|
||||
)
|
||||
fi
|
||||
|
||||
if [ "$delDir" ]; then
|
||||
( set -x; rm -rf "$dir" )
|
||||
(
|
||||
set -x
|
||||
rm -rf "$dir"
|
||||
)
|
||||
fi
|
||||
|
|
|
@ -82,7 +82,10 @@ chmod +x "$rootfsDir/usr/sbin/policy-rc.d"
|
|||
)
|
||||
|
||||
# shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB)
|
||||
( set -x; rootfs_chroot apt-get clean )
|
||||
(
|
||||
set -x
|
||||
rootfs_chroot apt-get clean
|
||||
)
|
||||
|
||||
# this file is one APT creates to make sure we don't "autoremove" our currently
|
||||
# in-use kernel, which doesn't really apply to debootstraps/Docker images that
|
||||
|
|
|
@ -20,9 +20,18 @@ installversion=
|
|||
mirror=
|
||||
while true; do
|
||||
case "$1" in
|
||||
-v|--version) installversion="$2" ; shift 2 ;;
|
||||
-m|--mirror) mirror="$2" ; shift 2 ;;
|
||||
--) shift ; break ;;
|
||||
-v | --version)
|
||||
installversion="$2"
|
||||
shift 2
|
||||
;;
|
||||
-m | --mirror)
|
||||
mirror="$2"
|
||||
shift 2
|
||||
;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
|
|
|
@ -31,7 +31,10 @@ echo "Nuking $dir ..."
|
|||
echo ' (if this is wrong, press Ctrl+C NOW!)'
|
||||
echo
|
||||
|
||||
( set -x; sleep 10 )
|
||||
(
|
||||
set -x
|
||||
sleep 10
|
||||
)
|
||||
echo
|
||||
|
||||
dir_in_dir() {
|
||||
|
@ -45,7 +48,10 @@ dir_in_dir() {
|
|||
for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do
|
||||
mount="$(readlink -f "$mount" || true)"
|
||||
if [ "$dir" != "$mount" ] && dir_in_dir "$mount" "$dir"; then
|
||||
( set -x; umount -f "$mount" )
|
||||
(
|
||||
set -x
|
||||
umount -f "$mount"
|
||||
)
|
||||
fi
|
||||
done
|
||||
|
||||
|
@ -55,10 +61,17 @@ if command -v btrfs > /dev/null 2>&1; then
|
|||
# Source: http://stackoverflow.com/a/32865333
|
||||
for subvol in $(find "$dir" -type d -inum 256 | sort -r); do
|
||||
if [ "$dir" != "$subvol" ]; then
|
||||
( set -x; btrfs subvolume delete "$subvol" )
|
||||
(
|
||||
set -x
|
||||
btrfs subvolume delete "$subvol"
|
||||
)
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# finally, DESTROY ALL THINGS
|
||||
( shopt -s dotglob; set -x; rm -rf "$dir"/* )
|
||||
(
|
||||
shopt -s dotglob
|
||||
set -x
|
||||
rm -rf "$dir"/*
|
||||
)
|
||||
|
|
|
@ -20,7 +20,7 @@ function urlencode() {
|
|||
local c="${1:i:1}"
|
||||
case $c in
|
||||
[a-zA-Z0-9.~_-]) printf "$c" ;;
|
||||
*) printf '%%%02X' "'$c"
|
||||
*) printf '%%%02X' "'$c" ;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
@ -32,15 +32,15 @@ function template() {
|
|||
|
||||
|
||||
\`docker version\`:
|
||||
`${DOCKER_COMMAND} -D version`
|
||||
$(${DOCKER_COMMAND} -D version)
|
||||
|
||||
|
||||
\`docker info\`:
|
||||
`${DOCKER_COMMAND} -D info`
|
||||
$(${DOCKER_COMMAND} -D info)
|
||||
|
||||
|
||||
\`uname -a\`:
|
||||
`uname -a`
|
||||
$(uname -a)
|
||||
|
||||
|
||||
Environment details (AWS, VirtualBox, physical, etc.):
|
||||
|
@ -76,7 +76,6 @@ function format_issue_url() {
|
|||
echo "${DOCKER_ISSUE_URL}?title=${issue_name}&body=${issue_body}"
|
||||
}
|
||||
|
||||
|
||||
echo -ne "Do you use \`sudo\` to call docker? [y|N]: "
|
||||
read -r -n 1 use_sudo
|
||||
echo ""
|
||||
|
@ -102,4 +101,3 @@ if which xdg-open 2>/dev/null >/dev/null ; then
|
|||
fi
|
||||
|
||||
echo "If you would like to manually open the url, you can open this link if your browser: ${issue_url}"
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
#!/bin/sh
|
||||
|
||||
|
||||
# containerd is also pinned in vendor.conf. When updating the binary
|
||||
# version you may also need to update the vendor version to pick up bug
|
||||
# fixes or new APIs.
|
||||
|
|
|
@ -34,5 +34,3 @@ _install_proxy() {
|
|||
git checkout -q "$LIBNETWORK_COMMIT"
|
||||
go build ${BUILD_MODE} -ldflags="$PROXY_LDFLAGS" -o ${PREFIX}/docker-proxy github.com/docker/libnetwork/cmd/proxy
|
||||
}
|
||||
|
||||
|
||||
|
|
12
hack/make.sh
12
hack/make.sh
|
@ -34,10 +34,10 @@ echo
|
|||
DEFAULT_BUNDLES=(
|
||||
binary-daemon
|
||||
dynbinary
|
||||
|
||||
\
|
||||
test-integration
|
||||
test-docker-py
|
||||
|
||||
\
|
||||
cross
|
||||
)
|
||||
|
||||
|
@ -95,10 +95,11 @@ fi
|
|||
# functionality. We favour libdm_dlsym_deferred_remove over
|
||||
# libdm_no_deferred_remove in dynamic cases because the binary could be shipped
|
||||
# with a newer libdevmapper than the one it was built with.
|
||||
if \
|
||||
if
|
||||
command -v gcc &> /dev/null \
|
||||
&& ! (echo -e '#include <libdevmapper.h>\nint main() { dm_task_deferred_remove(NULL); }' | gcc -xc - -o /dev/null $(pkg-config --libs devmapper) &> /dev/null) \
|
||||
; then
|
||||
;
|
||||
then
|
||||
add_buildtag libdm dlsym_deferred_remove
|
||||
fi
|
||||
|
||||
|
@ -134,7 +135,8 @@ if [ "$(uname -s)" = 'FreeBSD' ]; then
|
|||
fi
|
||||
|
||||
bundle() {
|
||||
local bundle="$1"; shift
|
||||
local bundle="$1"
|
||||
shift
|
||||
echo "---> Making bundle: $(basename "$bundle") (in $DEST)"
|
||||
source "$SCRIPTDIR/make/$bundle" "$@"
|
||||
}
|
||||
|
|
|
@ -72,8 +72,8 @@ fi
|
|||
|
||||
# -buildmode=pie is not supported on Windows and Linux on mips.
|
||||
case "$(go env GOOS)/$(go env GOARCH)" in
|
||||
windows/*|linux/mips*)
|
||||
;;
|
||||
windows/* | linux/mips*) ;;
|
||||
|
||||
*)
|
||||
BUILDFLAGS+=("-buildmode=pie")
|
||||
;;
|
||||
|
|
|
@ -2,7 +2,10 @@
|
|||
|
||||
if [ ! "$(go env GOOS)" = 'windows' ]; then
|
||||
for pidFile in $(find "$DEST" -name docker.pid); do
|
||||
pid=$([ -n "$TESTDEBUG" ] && set -x; cat "$pidFile")
|
||||
pid=$(
|
||||
[ -n "$TESTDEBUG" ] && set -x
|
||||
cat "$pidFile"
|
||||
)
|
||||
(
|
||||
[ -n "$TESTDEBUG" ] && set -x
|
||||
kill "$pid"
|
||||
|
|
|
@ -163,8 +163,8 @@ error_on_leaked_containerd_shims() {
|
|||
return
|
||||
fi
|
||||
|
||||
leftovers=$(ps -ax -o pid,cmd |
|
||||
awk '$2 == "containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration/ { print $1 }')
|
||||
leftovers=$(ps -ax -o pid,cmd \
|
||||
| awk '$2 == "containerd-shim" && $4 ~ /.*\/bundles\/.*\/test-integration/ { print $1 }')
|
||||
if [ -n "$leftovers" ]; then
|
||||
ps aux
|
||||
# shellcheck disable=SC2086
|
||||
|
|
|
@ -21,7 +21,6 @@ if [ -n "$DOCKER_STORAGE_OPTS" ]; then
|
|||
unset IFS
|
||||
fi
|
||||
|
||||
|
||||
listen_port=2375
|
||||
if [ -n "$DOCKER_PORT" ]; then
|
||||
IFS=':' read -r -a ports <<< "$DOCKER_PORT"
|
||||
|
|
|
@ -37,6 +37,7 @@ source hack/make/.integration-test-helpers
|
|||
echo "WARN: Skipping test-docker-py: connecting to docker daemon using ${docker_host_scheme} (${DOCKER_HOST}) not supported"
|
||||
bundle .integration-daemon-stop
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
||||
docker_py_image="docker-sdk-python3:${DOCKER_PY_COMMIT}"
|
||||
|
|
|
@ -3,11 +3,10 @@ set -e -o pipefail
|
|||
|
||||
source hack/validate/.validate
|
||||
|
||||
|
||||
run_integration_flaky() {
|
||||
new_tests=$(
|
||||
validate_diff --diff-filter=ACMR --unified=0 -- 'integration/*_test.go' |
|
||||
grep -E '^(\+func Test)(.*)(\*testing\.T\))' || true
|
||||
validate_diff --diff-filter=ACMR --unified=0 -- 'integration/*_test.go' \
|
||||
| grep -E '^(\+func Test)(.*)(\*testing\.T\))' || true
|
||||
)
|
||||
|
||||
if [ -z "$new_tests" ]; then
|
||||
|
|
|
@ -13,8 +13,9 @@ export DOCKER_ENGINE_GOARCH=${DOCKER_ENGINE_GOARCH:-${ARCH}}
|
|||
: ${TESTDEBUG:=}
|
||||
|
||||
integration_api_dirs=${TEST_INTEGRATION_DIR:-"$(
|
||||
find /tests/integration -type d |
|
||||
grep -vE '(^/tests/integration($|/internal)|/testdata)')"}
|
||||
find /tests/integration -type d \
|
||||
| grep -vE '(^/tests/integration($|/internal)|/testdata)'
|
||||
)"}
|
||||
|
||||
run_test_integration() {
|
||||
set_platform_timeout
|
||||
|
|
|
@ -5,8 +5,8 @@ export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
|||
source "${SCRIPTDIR}/.validate"
|
||||
|
||||
new_tests=$(
|
||||
validate_diff --diff-filter=ACMR --unified=0 -- 'integration-cli/*_api_*.go' 'integration-cli/*_cli_*.go' |
|
||||
grep -E '^\+func (.*) Test' || true
|
||||
validate_diff --diff-filter=ACMR --unified=0 -- 'integration-cli/*_api_*.go' 'integration-cli/*_cli_*.go' \
|
||||
| grep -E '^\+func (.*) Test' || true
|
||||
)
|
||||
|
||||
if [ -n "$new_tests" ]; then
|
||||
|
|
Loading…
Reference in a new issue