Skip to content

Commit

Permalink
Resolve other necessary changes
Browse files Browse the repository at this point in the history
  • Loading branch information
rjeberhard committed Apr 29, 2018
1 parent 8cd987e commit cc45cf5
Show file tree
Hide file tree
Showing 3 changed files with 183 additions and 57 deletions.
156 changes: 129 additions & 27 deletions src/integration-tests/bash/cleanup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,15 +27,13 @@
#
# The test runs in 4 phases:
#
# Phase 1: Delete test kubernetes artifacts in an orderly
# fashion via kubectl delete -f of previous tests's yaml
# plus various direct kubectl deletes.
# Phase 1: Delete test kubernetes artifacts with labels.
#
# Phase 2: Wait 15 seconds to see if stage 1 succeeded, and
# if not, repeatedly search for all test related kubectl
# artifacts and try delete them directly for up to 60 more
# seconds. This phase has no dependency on the
# previous test run's yaml files. It makes no
# Phase 2: Wait 15 seconds to see if stage 1 succeeded, and
# if not, repeatedly search for all test related kubectl
# artifacts and try delete them directly for up to 60 more
# seconds. This phase has no dependency on the
# previous test run's yaml files. It makes no
# attempt to delete artifacts in a particular order.
#
# Phase 3: Use a kubernetes job to delete the PV directories
Expand All @@ -47,13 +45,6 @@
# see LEASE_ID above.
#

DOMAINS=(domain1 domain2 domain3 domain4 domain5)
DOMAIN_NAMESPACES=(default default test1 test2 default)
DCOUNT=${#DOMAINS[@]}

OPER_NAMESPACES=(weblogic-operator-1 weblogic-operator-2)
OCOUNT=${#OPER_NAMESPACES[@]}

SCRIPTPATH="$( cd "$(dirname "$0")" > /dev/null 2>&1 ; pwd -P )"
PROJECT_ROOT="$SCRIPTPATH/../../.."
RESULT_ROOT=${RESULT_ROOT:-/scratch/$USER/wl_k8s_test_results}
Expand All @@ -63,25 +54,133 @@ USER_PROJECTS_DIR="$RESULT_DIR/user-projects"
TMP_DIR="$RESULT_DIR/cleanup_tmp"
JOB_NAME="weblogic-command-job"

function fail {
echo @@ cleanup.sh: Error "$@"
exit 1
}

#!/bin/bash
#
# Usage:
# getResWithLabel outfilename
#
function getResWithLabel {

# first, let's get all namespaced types with -l $LABEL_SELECTOR
kubectl get $NAMESPACED_TYPES \
-l "$LABEL_SELECTOR" \
-o=jsonpath='{range .items[*]}{.kind}{" "}{.metadata.name}{" -n "}{.metadata.namespace}{"\n"}{end}' \
--all-namespaces=true >> $1

# now, get all non-namespaced types with -l $LABEL_SELECTOR
kubectl get $NOT_NAMESPACED_TYPES \
-l "$LABEL_SELECTOR" \
-o=jsonpath='{range .items[*]}{.kind}{" "}{.metadata.name}{"\n"}{end}' \
--all-namespaces=true >> $1
}

#
# Usage:
# deleteResWithLabel outputfile
#
function deleteWithOneLabel {
echo @@ Delete resources with label $LABEL_SELECTOR.
# clean the output file first
if [ -e $1 ]; then
rm $1
fi

echo @@ Deleting resources with label $LABEL_SELECTOR.
getResWithLabel $1
# delete namespaced types
cat $1 | awk '{ print $4 }' | grep -v "^$" | sort -u | while read line; do
kubectl -n $line delete $NAMESPACED_TYPES -l "$LABEL_SELECTOR"
done

# delete non-namespaced types
local no_namespace_count=`grep -c -v " -n " $1`
if [ ! "$no_namespace_count" = "0" ]; then
kubectl delete $NOT_NAMESPACED_TYPES -l "$LABEL_SELECTOR"
fi

echo "@@ Waiting for pods to stop running."
local total=0
local mstart=`date +%s`
local mnow=mstart
local maxwaitsecs=60
while [ $((mnow - mstart)) -lt $maxwaitsecs ]; do
pods=($(kubectl get pods --all-namespaces -l $LABEL_SELECTOR -o jsonpath='{range .items[*]}{.metadata.name} {end}'))
total=${#pods[*]}
if [ $total -eq 0 ] ; then
break
else
echo "@@ There are $total running pods with label $LABEL_SELECTOR."
fi
sleep 3
mnow=`date +%s`
done

if [ $total -gt 0 ]; then
echo "Warning: after waiting $maxwaitsecs seconds, there are still $total running pods with label $LABEL_SELECTOR."
fi
}

#
# Usage:
# deleteNamespaces outputfile
#
function deleteNamespaces {
cat $1 | awk '{ print $4 }' | grep -v "^$" | sort -u | while read line; do
if [ "$line" != "default" ]; then
kubectl delete namespace $line --ignore-not-found
fi
done

}

function deleteWithLabels {
NAMESPACED_TYPES="pod,job,deploy,rs,service,pvc,ingress,cm,serviceaccount,role,rolebinding,secret"

DOMAIN_CRD="domains.weblogic.oracle"
if [ `kubectl get crd $DOMAIN_CRD --ignore-not-found | grep $DOMAIN_CRD | wc -l` = 1 ]; then
NAMESPACED_TYPES="$DOMAIN_CRD,$NAMESPACED_TYPES"
fi

NOT_NAMESPACED_TYPES="pv,crd,clusterroles,clusterrolebindings"

tempfile="/tmp/$(basename $0).tmp.$$" # == /tmp/[script-file-name].tmp.[pid]

echo @@ Deleting domain resources.
LABEL_SELECTOR="weblogic.domainUID"
deleteWithOneLabel "$tempfile-0"

echo @@ Deleting wls operator resources.
LABEL_SELECTOR="weblogic.operatorName"
deleteWithOneLabel "$tempfile-1"

deleteNamespaces "$tempfile-0"
deleteNamespaces "$tempfile-1"
}

# function genericDelete
#
# This function is a 'generic kubernetes delete' that takes three arguments:
#
# arg1: Comma separated list of types of kubernetes namespaced types to search/delete.
# example: "all,cm,pvc,ns,roles,rolebindings,secrets"
# example: "all,cm,pvc,ns,roles,rolebindings,secrets"
#
# arg2: Comma separated list of types of kubernetes non-namespaced types to search/delete.
# example: "crd,pv,clusterroles,clusterrolebindings"
# example: "crd,pv,clusterroles,clusterrolebindings"
#
# arg3: '|' (pipe) separated list of keywords.
# arg3: '|' (pipe) separated list of keywords.
# Artifacts with a label or name that contains one
# or more of the keywords are delete candidates.
# example: "logstash|kibana|elastisearch|weblogic|elk|domain"
#
# It runs in two stages:
# In the first, wait to see if artifacts delete on their own.
# In the second, try to delete any leftovers.
#
#
function genericDelete {

for iteration in first second; do
Expand Down Expand Up @@ -136,23 +235,23 @@ function genericDelete {

echo "@@ Waiting for $artcount_total artifacts to delete. Wait time $((mnow - mstart)) seconds (max=$maxwaitsecs). Waiting for:"

cat $resfile_yes | awk '{ print "n=" $1 " " $2 }'
cat $resfile_yes | awk '{ print "n=" $1 " " $2 }'
cat $resfile_no | awk '{ print $1 }'

else
# in the second thirty seconds we try to delete remaining artifacts

echo "@@ Trying to delete ${artcount_total} leftover artifacts, including ${artcount_yes} namespaced artifacts and ${artcount_no} non-namespaced artifacts, wait time $((mnow - mstart)) seconds (max=$maxwaitsecs)."

if [ ${artcount_yes} -gt 0 ]; then
if [ ${artcount_yes} -gt 0 ]; then
cat "$resfile_yes" | while read line; do
local args="`echo \"$line\" | awk '{ print "-n " $1 " delete " $2 " --ignore-not-found" }'`"
echo "kubectl $args"
kubectl $args
done
fi

if [ ${artcount_no} -gt 0 ]; then
if [ ${artcount_no} -gt 0 ]; then
cat "$resfile_no" | while read line; do
echo "kubectl delete $line --ignore-not-found"
kubectl delete $line --ignore-not-found
Expand Down Expand Up @@ -308,24 +407,27 @@ function fail {
}

echo @@ Starting cleanup.
script="${BASH_SOURCE[0]}"
scriptDir="$( cd "$(dirname "${script}")" > /dev/null 2>&1 ; pwd -P)"

echo "@@ RESULT_ROOT=$RESULT_ROOT TMP_DIR=$TMP_DIR RESULT_DIR=$RESULT_DIR PROJECT_ROOT=$PROJECT_ROOT"

mkdir -p $TMP_DIR || fail No permision to create directory $TMP_DIR

# try an ordered/controlled delete first

#Comment out orderlyDelete so we can fully test generic delete (do not merge this change!)
orderlyDelete
# first, try to delete with labels since the conversion is that all created resources need to
# have the proper label(s)
echo @@ Starting deleteWithLabels
deleteWithLabels

# try a generic delete in case the orderly delete missed something, this runs in two phases:
# second, try a generic delete in case there are some leftover resources, this runs in two phases:
# phase 1: wait to see if artifacts dissappear naturally due to the above orderlyDelete
# phase 2: kubectl delete left over artifacts
# arguments
# arg1 - namespaced kubernetes artifacts
# arg2 - non-namespaced artifacts
# arg3 - keywords in deletable artificats

echo @@ Starting genericDelete
genericDelete "all,cm,pvc,roles,rolebindings,serviceaccount,secrets" "crd,pv,ns,clusterroles,clusterrolebindings" "logstash|kibana|elastisearch|weblogic|elk|domain|traefik"
SUCCESS="$?"

Expand Down
Loading

0 comments on commit cc45cf5

Please sign in to comment.