diff --git a/Dockerfile b/Dockerfile
index fa80df2bf82..7574b0d274f 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -23,8 +23,8 @@ ENV LANG en_US.UTF-8
ENV JAVA_HOME /usr/local/java
ENV PATH /operator:$JAVA_HOME/bin:$PATH
-ENV JAVA_VERSION 14.0.2
-ENV JAVA_URL https://download.java.net/java/GA/jdk14.0.2/205943a0976c4ed48cb16f1043c5c647/12/GPL/openjdk-14.0.2_linux-x64_bin.tar.gz
+ENV JAVA_VERSION 15
+ENV JAVA_URL https://download.java.net/java/GA/jdk15/779bf45e88a44cbd9ea6621d33e33db1/36/GPL/openjdk-15_linux-x64_bin.tar.gz
# Install Java and make the operator run with a non-root user id (1000 is the `oracle` user)
RUN set -eux; \
@@ -43,17 +43,17 @@ RUN set -eux; \
alternatives --install "/usr/bin/$base" "$base" "$bin" 20000; \
done; \
java -Xshare:dump; \
- groupadd -g 1000 oracle; \
- useradd -d /operator -M -s /bin/bash -g 1000 -u 1000 oracle; \
- mkdir -p /operator/lib; \
- mkdir /logs; \
- chown -R 1000:1000 /operator /logs
+ useradd -d /operator -M -s /bin/bash -g root -u 1000 oracle; \
+ mkdir -m 775 /operator; \
+ mkdir -m 775 /logs; \
+ mkdir /operator/lib; \
+ chown -R oracle:root /operator /logs
-USER 1000
+USER oracle
-COPY src/scripts/* /operator/
-COPY operator/target/weblogic-kubernetes-operator.jar /operator/weblogic-kubernetes-operator.jar
-COPY operator/target/lib/*.jar /operator/lib/
+COPY --chown=oracle:root src/scripts/* /operator/
+COPY --chown=oracle:root operator/target/weblogic-kubernetes-operator.jar /operator/weblogic-kubernetes-operator.jar
+COPY --chown=oracle:root operator/target/lib/*.jar /operator/lib/
HEALTHCHECK --interval=1m --timeout=10s \
CMD /operator/livenessProbe.sh
diff --git a/README.md b/README.md
index 7e195c95969..ed61a00fe42 100644
--- a/README.md
+++ b/README.md
@@ -17,8 +17,8 @@ Oracle is finding ways for organizations using WebLogic Server to run important
The fastest way to experience the operator is to follow the [Quick Start guide](https://oracle.github.io/weblogic-kubernetes-operator/quickstart/), or you can peruse our [documentation](https://oracle.github.io/weblogic-kubernetes-operator), read our [blogs](https://blogs.oracle.com/weblogicserver/updated-weblogic-kubernetes-support-with-operator-20), or try out the [samples](https://oracle.github.io/weblogic-kubernetes-operator/samples/).
***
-The [current release of the operator](https://github.com/oracle/weblogic-kubernetes-operator/releases) is 3.0.3.
-This release was published on November 9, 2020.
+The [current release of the operator](https://github.com/oracle/weblogic-kubernetes-operator/releases) is 3.1.0.
+This release was published on November 13, 2020.
***
# Documentation
diff --git a/SECURITY.md b/SECURITY.md
new file mode 100644
index 00000000000..3f77a028eaf
--- /dev/null
+++ b/SECURITY.md
@@ -0,0 +1,18 @@
+# Reporting Security Vulnerabilities
+
+Oracle values the independent security research community and believes that responsible disclosure of security vulnerabilities helps us ensure the security and privacy of all our users.
+
+Please do NOT raise a GitHub Issue to report a security vulnerability. If you believe you have found a security vulnerability, please submit a report to secalert_us@oracle.com preferably with a proof of concept. We provide additional information on [how to report security vulnerabilities to Oracle](https://www.oracle.com/corporate/security-practices/assurance/vulnerability/reporting.html) which includes public encryption keys for secure email.
+
+We ask that you do not use other channels or contact project contributors directly.
+
+Non-vulnerability related security issues such as new great new ideas for security features are welcome on GitHub Issues.
+
+## Security Updates, Alerts and Bulletins
+
+Security updates will be released on a regular cadence. Many of our projects will typically release security fixes in conjunction with the [Oracle Critical Patch Update](https://www.oracle.com/security-alerts/) program. Security updates are released on the Tuesday closest to the 17th day of January, April, July and October. A pre-release announcement will be published on the Thursday preceding each release. Additional information, including past advisories, is available on our [Security Alerts](https://www.oracle.com/security-alerts/) page.
+
+## Security-Related Information
+
+We will provide security related information such as a threat model, considerations for secure use, or any known security issues in our documentation. Please note that labs and sample code are intended to demonstrate a concept and may not be sufficiently hardened for production use.
+
diff --git a/buildDockerImage.sh b/buildDockerImage.sh
index 7990d02fff2..d3c79144c8e 100755
--- a/buildDockerImage.sh
+++ b/buildDockerImage.sh
@@ -33,7 +33,7 @@ while getopts "t:" optname; do
esac
done
-IMAGE_NAME=${name:-oracle/weblogic-kubernetes-operator:3.0.3}
+IMAGE_NAME=${name:-oracle/weblogic-kubernetes-operator:3.1.0}
SCRIPTPATH="$( cd "$(dirname "$0")" > /dev/null 2>&1 ; pwd -P )"
# Proxy settings
diff --git a/buildtime-reports/pom.xml b/buildtime-reports/pom.xml
index b282c697a3d..8aa2ba321ce 100644
--- a/buildtime-reports/pom.xml
+++ b/buildtime-reports/pom.xml
@@ -8,7 +8,7 @@
operator-parentoracle.kubernetes
- 3.0.3
+ 3.1.0buildtime-reports
@@ -60,14 +60,9 @@
${project.groupId}
- operator-integration-tests
- ${project.version}
-
-
- ${project.groupId}
- new-integration-tests
+ integration-tests${project.version}
-
\ No newline at end of file
+
diff --git a/docs-source/content/_index.md b/docs-source/content/_index.md
index f3294fddb46..59a578ce3d3 100644
--- a/docs-source/content/_index.md
+++ b/docs-source/content/_index.md
@@ -23,8 +23,8 @@ using the operator to deploy and run a WebLogic domain container-packaged web ap
***
#### Current production release
-The [current release of the operator](https://github.com/oracle/weblogic-kubernetes-operator/releases) is 3.0.3.
-This release was published on November 9, 2020. See the operator prerequisites and supported environments [here]({{< relref "/userguide/introduction/introduction#operator-prerequisites" >}}).
+The [current release of the operator](https://github.com/oracle/weblogic-kubernetes-operator/releases) is 3.1.0.
+This release was published on November 13, 2020. See the operator prerequisites and supported environments [here]({{< relref "/userguide/introduction/introduction#operator-prerequisites" >}}).
***
diff --git a/docs-source/content/faq/namespace-management.md b/docs-source/content/faq/namespace-management.md
index f499eb50bc5..6f729885a0a 100644
--- a/docs-source/content/faq/namespace-management.md
+++ b/docs-source/content/faq/namespace-management.md
@@ -6,11 +6,11 @@ weight: 1
description: "Considerations for managing namespaces while the operator is running."
---
-Each operator deployment manages a number of Kubernetes Namespaces. For more information, see [Operator Helm configuration values]({{< relref "/userguide/managing-operators/using-the-operator/using-helm#operator-helm-configuration-values" >}}). A number of Kubernetes resources
-must be present in a namespace before any WebLogic domain custom resources can be successfully
-deployed into it.
+Each operator deployment manages a number of Kubernetes namespaces. For more information, see [Operator Helm configuration values]({{< relref "/userguide/managing-operators/using-the-operator/using-helm#operator-helm-configuration-values" >}}). A number of Kubernetes resources
+must be present in a namespace before any WebLogic Server instances can be successfully
+started.
Those Kubernetes resources are created either as part of the installation
-of the operator's Helm chart, or created by the operator at runtime.
+of a release of the operator's Helm chart, or created by the operator.
This FAQ describes some considerations to be aware of when you manage the namespaces while the operator is running. For example:
@@ -22,12 +22,15 @@ This FAQ describes some considerations to be aware of when you manage the namesp
For others, see [Common Mistakes and Solutions]({{< relref "/userguide/managing-operators/using-the-operator/using-helm#common-mistakes-and-solutions" >}}).
{{% notice note %}}
-There can be multiple operators in a Kubernetes cluster, and in that case, you must ensure that their respective lists of `domainNamespaces` do not overlap.
+There can be multiple operators in a Kubernetes cluster, and in that case, you must ensure that the namespaces managed by these operators do not overlap.
{{% /notice %}}
#### Check the namespaces that the operator manages
-You can find the list of the namespaces that the operator manages using the `helm get values` command.
-For example, the following command shows all the values of the operator release `weblogic-operator`; the `domainNamespaces` list contains `default` and `ns1`.
+Prior to version 3.1.0, the operator supported specifying the namespaces that it would manage only through a list.
+Now, the operator supports a list of namespaces, a label selector, or a regular expression matching namespace names.
+
+For operators that specify namespaces by a list, you can find the list of the namespaces using the `helm get values` command.
+For example, the following command shows all the values of the operator release `weblogic-operator`; the `domainNamespaces` list contains `default` and `ns1`:
```
$ helm get values weblogic-operator
@@ -40,7 +43,8 @@ elkIntegrationEnabled: false
externalDebugHttpPort: 30999
externalRestEnabled: false
externalRestHttpsPort: 31001
-image: oracle/weblogic-kubernetes-operator:3.0.3
+image: oracle/weblogic-kubernetes-operator:3.1.0
+>>>>>>> origin/master
imagePullPolicy: IfNotPresent
internalDebugHttpPort: 30999
istioEnabled: false
@@ -49,7 +53,19 @@ logStashImage: logstash:6.6.0
remoteDebugNodePortEnabled: false
serviceAccount: default
suspendOnDebugStartup: false
+```
+
+For operators that select namespaces with a selector, simply list namespaces using that selector:
+
+```
+$ kubectl get ns --selector="weblogic-operator=enabled"
+```
+
+For operators that select namespaces with a regular expression matching the name, you can use a combination of `kubectl`
+and any command-line tool that can process the regular expression, such as `grep`:
+```
+$ kubectl get ns -o go-template='{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}' | grep "^weblogic"
```
If you don't know the release name of the operator, you can use `helm list` to list all the releases for a specified namespace or all namespaces:
@@ -59,30 +75,25 @@ $ helm list --namespace
$ helm list --all-namespaces
```
-#### Add a Kubernetes Namespace to the operator
-If you want an operator deployment to manage a namespace, you need to add the namespace to the operator's `domainNamespaces` list. Note that the namespace has to already exist, for example, using the `kubectl create` command.
+#### Add a Kubernetes namespace to the operator
+When the operator is configured to manage a list of namespaces and you want the operator to manage an additional namespace,
+you need to add the namespace to the operator's `domainNamespaces` list. Note that this namespace has to already exist, for example,
+using the `kubectl create` command.
-Adding a namespace to the `domainNamespaces` list tells the operator deployment or runtime
-to initialize the necessary Kubernetes resources for the namespace so that the operator is ready to run and monitor WebLogic Server instances in that namespace.
+Adding a namespace to the `domainNamespaces` list tells the operator to initialize the necessary
+Kubernetes resources so that the operator is ready to manage WebLogic Server instances in that namespace.
-When the operator is running and managing the `default` namespace, the following example Helm command adds the namespace `ns1` to the `domainNamespaces` list, where `weblogic-operator` is the release name of the operator, and `kubernetes/charts/weblogic-operator` is the location of the operator's Helm charts.
+When the operator is managing the `default` namespace, the following example Helm command adds the namespace `ns1` to the `domainNamespaces` list, where `weblogic-operator` is the release name of the operator, and `kubernetes/charts/weblogic-operator` is the location of the operator's Helm charts:
```
$ helm upgrade \
+ weblogic-operator \
+ kubernetes/charts/weblogic-operator \
--reuse-values \
--set "domainNamespaces={default,ns1}" \
- --wait \
- --force \
- weblogic-operator \
- kubernetes/charts/weblogic-operator
+ --wait
```
-{{% notice note %}}
-Changes to the `domainNamespaces` list might not be picked up by the operator right away because the operator
-monitors the changes to the setting periodically. The operator becomes ready to manage Domains in
-a namespace only after the required `configmap` (namely `weblogic-scripts-cm`) is initialized in the namespace.
-{{% /notice %}}
-
You can verify that the operator has initialized a namespace by confirming the existence of the required `configmap` resource.
```
@@ -99,14 +110,28 @@ NAME DATA AGE
weblogic-scripts-cm 14 12m
```
-#### Delete a Kubernetes Namespace from the operator
-When you no longer want a namespace to be managed by the operator, you need to remove it from
-the operator's `domainNamespaces` list, so that the corresponding Kubernetes resources that are
+For operators configured to select managed namespaces through the use of a label selector or regular expression,
+you simply need to create a namespace with the appropriate labels or with a name that matches the expression, respectively.
+
+If you did not choose to enable the value, `enableClusterRoleBinding`, then the operator will not have the necessary
+permissions to manage the namespace. You can do this by performing a `helm upgrade` with the values used when installing the
+Helm release:
+
+```
+$ helm upgrade \
+ weblogic-operator \
+ kubernetes/charts/weblogic-operator \
+ --reuse-values
+```
+
+#### Delete a Kubernetes namespace from the operator
+When the operator is configured to manage a list of namespaces and you no longer want a namespace to be managed by the operator, you need to remove it from
+the operator's `domainNamespaces` list, so that the resources that are
associated with the namespace can be cleaned up.
While the operator is running and managing the `default` and `ns1` namespaces, the following example Helm
command removes the namespace `ns1` from the `domainNamespaces` list, where `weblogic-operator` is the release
-name of the operator, and `kubernetes/charts/weblogic-operator` is the location of the operator Helm charts.
+name of the operator, and `kubernetes/charts/weblogic-operator` is the location of the operator Helm charts:
```
$ helm upgrade \
@@ -116,24 +141,19 @@ $ helm upgrade \
--force \
weblogic-operator \
kubernetes/charts/weblogic-operator
-
```
-#### Recreate a previously deleted Kubernetes Namespace
+For operators configured to select managed namespaces through the use of a label selector or regular expression,
+you simply need to delete the namespace. For the label selector option, you can also adjust the labels on the namespace
+so that the namespace no longer matches the selector.
+
+#### Recreate a previously deleted Kubernetes namespace
-If you need to delete a namespace (and the resources in it) and then recreate it,
+When the operator is configured to manage a list of namespaces and if you need to delete a namespace (and the resources in it) and then recreate it,
remember to remove the namespace from the operator's `domainNamespaces` list
after you delete the namespace, and add it back to the `domainNamespaces` list after you recreate the namespace
using the `helm upgrade` commands that were illustrated previously.
-{{% notice note %}}
-Make sure that you wait a sufficient period of time between deleting and recreating the
-namespace because it takes time for the resources in a namespace to go away after the namespace is deleted.
-In addition, as mentioned above, changes to the `domainNamespaces` setting is monitored by the operator
-periodically, and the operator becomes ready to manage Domains only after the required domain
-`configmap` (namely `weblogic-scripts-cm`) is initialized in the namespace.
-{{% /notice %}}
-
If a domain custom resource is created before the namespace is ready, you might see that the introspector job pod
fails to start, with a warning like the following, when you review the description of the introspector pod.
Note that `domain1` is the name of the domain in the following example output.
@@ -142,7 +162,7 @@ Note that `domain1` is the name of the domain in the following example output.
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
- Normal Scheduled 1m default-scheduler Successfully assigned domain1-introspect-domain-job-bz6rw to slc16ffk
+ Normal Scheduled 1m default-scheduler Successfully assigned domain1-introspector-bz6rw to slc16ffk
Normal SuccessfulMountVolume 1m kubelet, slc16ffk MountVolume.SetUp succeeded for volume "weblogic-credentials-volume"
diff --git a/docs-source/content/faq/oci-fss-pv.md b/docs-source/content/faq/oci-fss-pv.md
index 6d30dfba18b..cf05b961fde 100644
--- a/docs-source/content/faq/oci-fss-pv.md
+++ b/docs-source/content/faq/oci-fss-pv.md
@@ -53,7 +53,7 @@ Init Containers:
Command:
sh
-c
- chown -R 1000:1000 /shared
+ chown -R 1000:0 /shared
State: Terminated
Reason: Error
Exit Code: 1
@@ -85,7 +85,7 @@ spec:
initContainers:
- name: fix-pvc-owner
image: %WEBLOGIC_IMAGE%
- command: ["sh", "-c", "chown 1000:1000 %DOMAIN_ROOT_DIR%/. && find %DOMAIN_ROOT_DIR%/. -maxdepth 1 ! -name '.snapshot' ! -name '.' -print0 | xargs -r -0 chown -R 1000:1000"]
+ command: ["sh", "-c", "chown 1000:0 %DOMAIN_ROOT_DIR%/. && find %DOMAIN_ROOT_DIR%/. -maxdepth 1 ! -name '.snapshot' ! -name '.' -print0 | xargs -r -0 chown -R 1000:0"]
volumeMounts:
- name: weblogic-sample-domain-storage-volume
mountPath: %DOMAIN_ROOT_DIR%
diff --git a/docs-source/content/faq/oci-lb.md b/docs-source/content/faq/oci-lb.md
index c840888a86b..535df049b65 100644
--- a/docs-source/content/faq/oci-lb.md
+++ b/docs-source/content/faq/oci-lb.md
@@ -79,7 +79,7 @@ but initially the external IP is shown as ``.
$ kubectl -n bob get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
bobs-bookstore-admin-server ClusterIP None 8888/TCP,7001/TCP,30101/TCP 9d
-bobs-bookstore-admin-server-external NodePort 10.96.224.13 7001:32401/TCP 9d
+bobs-bookstore-admin-server-ext NodePort 10.96.224.13 7001:32401/TCP 9d
bobs-bookstore-cluster-cluster-1 ClusterIP 10.96.86.113 8888/TCP,8001/TCP,31111/TCP 9d
bobs-bookstore-managed-server1 ClusterIP None 8888/TCP,8001/TCP,31111/TCP 9d
bobs-bookstore-managed-server2 ClusterIP None 8888/TCP,8001/TCP,31111/TCP 9d
@@ -93,7 +93,7 @@ external IP address will be displayed:
$ kubectl -n bob get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
bobs-bookstore-admin-server ClusterIP None 8888/TCP,7001/TCP,30101/TCP 9d
-bobs-bookstore-admin-server-external NodePort 10.96.224.13 7001:32401/TCP 9d
+bobs-bookstore-admin-server-ext NodePort 10.96.224.13 7001:32401/TCP 9d
bobs-bookstore-cluster-cluster-1 ClusterIP 10.96.86.113 8888/TCP,8001/TCP,31111/TCP 9d
bobs-bookstore-managed-server1 ClusterIP None 8888/TCP,8001/TCP,31111/TCP 9d
bobs-bookstore-managed-server2 ClusterIP None 8888/TCP,8001/TCP,31111/TCP 9d
diff --git a/docs-source/content/quickstart/cleanup.md b/docs-source/content/quickstart/cleanup.md
index 97c0dd28049..1e9e0a21b24 100644
--- a/docs-source/content/quickstart/cleanup.md
+++ b/docs-source/content/quickstart/cleanup.md
@@ -28,26 +28,15 @@ weight: 7
```
#### Remove the domain namespace.
-1. Configure the Traefik load balancer to stop managing the ingresses in the domain namespace:
+1. Configure the Traefik ingress controller to stop managing the ingresses in the domain namespace:
```bash
- $ helm upgrade traefik-operator stable/traefik \
+ $ helm upgrade traefik-operator traefik/traefik \
--namespace traefik \
--reuse-values \
- --set "kubernetes.namespaces={traefik}" \
- --wait
+ --set "kubernetes.namespaces={traefik}"
```
-1. Configure the operator to stop managing the domain:
-
- ```bash
- $ helm upgrade sample-weblogic-operator \
- kubernetes/charts/weblogic-operator \
- --namespace sample-weblogic-operator-ns \
- --reuse-values \
- --set "domainNamespaces={}" \
- --wait \
- ```
1. Delete the domain namespace:
```bash
@@ -69,9 +58,9 @@ weight: 7
$ kubectl delete namespace sample-weblogic-operator-ns
```
-#### Remove the load balancer.
+#### Remove the ingress controller.
-1. Remove the Traefik load balancer:
+1. Remove the Traefik ingress controller:
```bash
$ helm uninstall traefik-operator -n traefik
diff --git a/docs-source/content/quickstart/create-domain.md b/docs-source/content/quickstart/create-domain.md
index ed5226c2bd0..488dbcaad9c 100644
--- a/docs-source/content/quickstart/create-domain.md
+++ b/docs-source/content/quickstart/create-domain.md
@@ -11,7 +11,7 @@ weight: 6
* Select a user name and password, following the required rules for password creation (at least 8 alphanumeric characters with at least one number or special character).
* Pick or create a directory to which you can write output.
-1. Create a Kubernetes Secret for the WebLogic administrator credentials containing the `username` and `password` for the domain, using the [create-weblogic-credentials](http://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/scripts/create-weblogic-domain-credentials/create-weblogic-credentials.sh) script:
+1. Create a Kubernetes Secret for the WebLogic domain administrator credentials containing the `username` and `password` for the domain, using the [create-weblogic-credentials](http://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/scripts/create-weblogic-domain-credentials/create-weblogic-credentials.sh) script:
```bash
$ kubernetes/samples/scripts/create-weblogic-domain-credentials/create-weblogic-credentials.sh \
@@ -72,9 +72,9 @@ weight: 6
```
-1. To confirm that the load balancer noticed the new ingress and is successfully routing to the domain's server pods,
+1. To confirm that the ingress controller noticed the new ingress and is successfully routing to the domain's server pods,
you can send a request to the URL for the "WebLogic ReadyApp framework", as
- shown in the example below, which will return a HTTP 200 status code.
+ shown in the example below, which will return an HTTP 200 status code.
```
$ curl -v -H 'host: sample-domain1.org' http://localhost:30305/weblogic/ready
diff --git a/docs-source/content/quickstart/get-images.md b/docs-source/content/quickstart/get-images.md
index 558c0eecd97..5e180e7a9c4 100644
--- a/docs-source/content/quickstart/get-images.md
+++ b/docs-source/content/quickstart/get-images.md
@@ -7,10 +7,10 @@ weight: 3
#### Get these images and put them into your local registry.
-1. If you don't already have one, obtain a Docker store account, log in to the Docker store,
+1. If you don't already have one, obtain a Docker Store account, log in to the Docker Store,
and accept the license agreement for the [WebLogic Server image](https://hub.docker.com/_/oracle-weblogic-server-12c).
-1. Log in to the Docker store from your Docker client:
+1. Log in to the Docker Store from your Docker client:
```bash
$ docker login
@@ -19,16 +19,16 @@ and accept the license agreement for the [WebLogic Server image](https://hub.doc
1. Pull the operator image:
```bash
- $ docker pull oracle/weblogic-kubernetes-operator:3.0.3
+ $ docker pull oracle/weblogic-kubernetes-operator:3.1.0
```
-1. Pull the Traefik load balancer image:
+1. Pull the Traefik ingress controller image:
```bash
$ docker pull traefik:2.2.1
```
-1. Obtain the WebLogic image from the [Oracle Container Registry](https://container-registry.oracle.com).
+1. Obtain the WebLogic Server image from the [Oracle Container Registry](https://container-registry.oracle.com).
a. First time users, follow these [directions]({{< relref "/userguide/managing-domains/domain-in-image/base-images/_index.md#obtaining-standard-images-from-the-oracle-container-registry" >}}).
@@ -38,7 +38,7 @@ and accept the license agreement for the [WebLogic Server image](https://hub.doc
$ docker pull container-registry.oracle.com/middleware/weblogic:12.2.1.4
```
- {{% notice note %}} The WebLogic Docker image, `weblogic:12.2.1.3`, has all the necessary patches applied. The WebLogic Docker image, `weblogic:12.2.1.4`, does not require any additional patches.
+ {{% notice note %}} The WebLogic Server Docker image, `weblogic:12.2.1.3`, has all the necessary patches applied. The WebLogic Server Docker image, `weblogic:12.2.1.4`, does not require any additional patches.
{{% /notice %}}
diff --git a/docs-source/content/quickstart/install.md b/docs-source/content/quickstart/install.md
index 4ac3174455f..e779329f9a5 100644
--- a/docs-source/content/quickstart/install.md
+++ b/docs-source/content/quickstart/install.md
@@ -1,40 +1,22 @@
---
-title: "Install the operator and load balancer"
+title: "Install the operator and ingress controller"
date: 2019-02-22T15:44:42-05:00
draft: false
weight: 4
---
-#### Grant the Helm service account the `cluster-admin` role.
-
-```bash
-$ cat <}}).
4. Verify that the operator's pod is running, by listing the pods in the operator's namespace. You should see one
for the operator.
diff --git a/docs-source/content/quickstart/introduction.md b/docs-source/content/quickstart/introduction.md
index 555bd5c2a45..4e4fc4b77d7 100644
--- a/docs-source/content/quickstart/introduction.md
+++ b/docs-source/content/quickstart/introduction.md
@@ -5,12 +5,12 @@ draft: false
weight: 1
---
-Use this Quick Start guide to create a WebLogic deployment in a Kubernetes cluster with the Oracle WebLogic Server Kubernetes Operator. Please note that this walk-through is for demonstration purposes only, not for use in production.
-These instructions assume that you are already familiar with Kubernetes. If you need more detailed instructions, please
+Use this Quick Start guide to create a WebLogic Server deployment in a Kubernetes cluster with the Oracle WebLogic Server Kubernetes Operator. Please note that this walk-through is for demonstration purposes only, not for use in production.
+These instructions assume that you are already familiar with Kubernetes. If you need more detailed instructions, please
refer to the [User guide]({{< relref "/userguide/_index.md" >}}).
{{% notice note %}}
-All Kubernetes distributions and managed services have small differences. In particular,
+All Kubernetes distributions and managed services have small differences. In particular,
the way that persistent storage and load balancers are managed varies significantly.
You may need to adjust the instructions in this guide to suit your particular flavor of Kubernetes.
{{% /notice %}}
@@ -19,7 +19,7 @@ You may need to adjust the instructions in this guide to suit your particular fl
{{% notice warning %}}
If you have an older version of the operator installed on your cluster, for example, a 1.x version or one of the 2.0 release
-candidates, then you must remove it before installing this version. This includes the 2.0-rc1 version; it must be completely removed.
+candidates, then you must remove it before installing this version. This includes the 2.0-rc1 version; it must be completely removed.
You should remove the deployment (for example, `kubectl delete deploy weblogic-operator -n your-namespace`) and the custom
resource definition (for example, `kubectl delete crd domain`). If you do not remove
the custom resource definition you may see errors like this:
diff --git a/docs-source/content/quickstart/prepare.md b/docs-source/content/quickstart/prepare.md
index 7b1946e0291..af722c11c96 100644
--- a/docs-source/content/quickstart/prepare.md
+++ b/docs-source/content/quickstart/prepare.md
@@ -12,24 +12,13 @@ weight: 5
$ kubectl create namespace sample-domain1-ns
```
-1. Use `helm` to configure the operator to manage domains in this namespace:
-
- ```bash
- $ helm upgrade sample-weblogic-operator kubernetes/charts/weblogic-operator \
- --namespace sample-weblogic-operator-ns \
- --reuse-values \
- --set "domainNamespaces={sample-domain1-ns}" \
- --wait
- ```
-
1. Configure Traefik to manage ingresses created in this namespace:
```bash
- $ helm upgrade traefik-operator stable/traefik \
+ $ helm upgrade traefik-operator traefik/traefik \
--namespace traefik \
--reuse-values \
- --set "kubernetes.namespaces={traefik,sample-domain1-ns}" \
- --wait
+ --set "kubernetes.namespaces={traefik,sample-domain1-ns}"
```
{{% notice note %}}
diff --git a/docs-source/content/release-notes.md b/docs-source/content/release-notes.md
index 40c59fb2b1e..6f88b50b2b8 100644
--- a/docs-source/content/release-notes.md
+++ b/docs-source/content/release-notes.md
@@ -6,8 +6,9 @@ draft: false
### Releases
-| Date | Version | Introduces backward incompatibilities? | Changes |
+| Date | Version | Introduces backward incompatibilities? | Change |
| --- | --- | --- | --- |
+| November 13, 2020 | v3.1.0 | no | Enhanced options for specifying managed namespaces. Helm 3.1.3+ now required. |
| November 9, 2020 | v3.0.3 | no | This release contains a fix for pods that are stuck in the Terminating state after an unexpected shut down of a worker node. |
| September 15, 2020 | v3.0.2 | no | This release contains several fixes, including improvements to log rotation and a fix that avoids unnecessarily updating the domain status. |
| August 13, 2020 | v3.0.1 | no | Fixed an issue preventing the REST interface from working after a Helm upgrade. Helm 3.1.3+ now required. |
@@ -30,6 +31,30 @@ draft: false
### Change log
+#### Operator 3.1.0
+
+* All fixes included in 3.0.1, 3.0.2, and 3.0.3 are included in 3.1.0.
+* Sample [scripts to start and stop server instances]({{< relref "/userguide/managing-domains/domain-lifecycle/startup#domain-lifecycle-sample-scripts" >}}) ([#2002](https://github.com/oracle/weblogic-kubernetes-operator/pull/2002)).
+* Support running with [OpenShift restrictive SCC]({{< relref "/security/openshift#create-a-custom-security-context-constraint" >}}) ([#2007](https://github.com/oracle/weblogic-kubernetes-operator/pull/2007)).
+* Updated [default resource and Java options]({{< relref "/faq/resource-settings.md" >}}) ([#1775](https://github.com/oracle/weblogic-kubernetes-operator/pull/1775)).
+* Introspection failures are logged to the operator's log ([#1787](https://github.com/oracle/weblogic-kubernetes-operator/pull/1787)).
+* Mirror introspector log to a rotating file in the log home ([#1827](https://github.com/oracle/weblogic-kubernetes-operator/pull/1827)).
+* Reflect introspector status to domain status ([#1832](https://github.com/oracle/weblogic-kubernetes-operator/pull/1832)).
+* Ensure operator detects pod state changes even when watch events are not delivered ([#1811](https://github.com/oracle/weblogic-kubernetes-operator/pull/1811)).
+* Support configurable WDT model home ([#1828](https://github.com/oracle/weblogic-kubernetes-operator/pull/1828)).
+* [Namespace management enhancements]({{< relref "/faq/namespace-management.md" >}}) ([#1860](https://github.com/oracle/weblogic-kubernetes-operator/pull/1860)).
+* Limit concurrent pod shut down while scaling down a cluster ([#1892](https://github.com/oracle/weblogic-kubernetes-operator/pull/1892)).
+* List continuation and watch bookmark support ([#1881](https://github.com/oracle/weblogic-kubernetes-operator/pull/1881)).
+* Fix scaling script when used with dedicated namespace mode ([#1921](https://github.com/oracle/weblogic-kubernetes-operator/pull/1921)).
+* Fix token substitution for mount paths ([#1911](https://github.com/oracle/weblogic-kubernetes-operator/pull/1911)).
+* Validate existence of service accounts during Helm chart processing ([#1939](https://github.com/oracle/weblogic-kubernetes-operator/pull/1939)).
+* Use Kubernetes Java Client 10.0.0 ([#1937](https://github.com/oracle/weblogic-kubernetes-operator/pull/1937)).
+* Better validation and guidance when using longer domainUID values ([#1979](https://github.com/oracle/weblogic-kubernetes-operator/pull/1979)).
+* Update pods with label for introspection version ([#2012](https://github.com/oracle/weblogic-kubernetes-operator/pull/2012)).
+* Fix validation error during inrtrospector for certain static clusters ([#2014](https://github.com/oracle/weblogic-kubernetes-operator/pull/2014)).
+* Correct issue in wl-pod-wait.sh sample script ([#2018](https://github.com/oracle/weblogic-kubernetes-operator/pull/2018)).
+* Correct processing of ALWAYS serverStartPolicy ([#2020](https://github.com/oracle/weblogic-kubernetes-operator/pull/2020)).
+
#### Operator 3.0.3
* The operator now responds to WebLogic Server instance pods that are stuck in the Terminating state when those pods are evicted from a node that has unexpectedly shut down and where Kubernetes has not removed the pod.
diff --git a/docs-source/content/samples/simple/azure-kubernetes-service/_index.md b/docs-source/content/samples/simple/azure-kubernetes-service/_index.md
index 9437402c08c..a98dc4d49a4 100644
--- a/docs-source/content/samples/simple/azure-kubernetes-service/_index.md
+++ b/docs-source/content/samples/simple/azure-kubernetes-service/_index.md
@@ -109,9 +109,9 @@ You will need an Oracle account. The following steps will direct you to accept t
1. Obtain the WebLogic Server image from the [Oracle Container Registry](https://container-registry.oracle.com/).
a. First time users, [follow these directions](/weblogic-kubernetes-operator/userguide/managing-domains/domain-in-image/base-images/#obtaining-standard-images-from-the-oracle-container-registry).
-
+
b. Find and then pull the WebLogic 12.2.1.3 install image:
-
+
```bash
$ docker pull container-registry.oracle.com/middleware/weblogic:12.2.1.3
```
@@ -125,7 +125,7 @@ $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git
#cd weblogic-kubernetes-operator
$ git checkout v3.0.3
```
-
+
{{% notice info %}} The following sections of the sample instructions will guide you, step-by-step, through the process of setting up a WebLogic cluster on AKS - remaining as close as possible to a native Kubernetes experience. This lets you understand and customize each step. If you wish to have a more automated experience that abstracts some lower level details, you can skip to the [Automation](#automation) section.
{{% /notice %}}
@@ -389,13 +389,13 @@ Kubernetes Operators use [Helm](https://helm.sh/) to manage Kubernetes applicati
```bash
$ helm repo add weblogic-operator https://oracle.github.io/weblogic-kubernetes-operator/charts
$ helm repo update
-$ helm install weblogic-operator weblogic-operator/weblogic-operator --version "3.0.0"
+$ helm install weblogic-operator weblogic-operator/weblogic-operator --version "3.0.3"
```
The output will show something similar to the following:
```bash
-$ helm install weblogic-operator weblogic-operator/weblogic-operator --version "3.0.0"
+$ helm install weblogic-operator weblogic-operator/weblogic-operator --version "3.0.3"
NAME: weblogic-operator
LAST DEPLOYED: Wed Jul 1 23:47:44 2020
NAMESPACE: default
@@ -485,7 +485,7 @@ Now that we have created the AKS cluster, installed the operator, and verified t
We need to set up the domain configuration for the WebLogic domain. This step uses the configuration generated previously.
Validate all the resources created above using the script `kubernetes/samples/scripts/create-weblogic-domain-on-azure-kubernetes-service/validate.sh`.
-
+
Use the following commands to check if the resources are ready:
```bash
@@ -534,13 +534,13 @@ Now that we have created the AKS cluster, installed the operator, and verified t
status on iteration 2 of 20
pod domain1-create-weblogic-sample-domain-job-4l767 status is Running
```
-
+
If you see error messages that include the status `ImagePullBackOff` along with output similar to the following, it is likely your credentials for the Oracle Container Registry have not been successfully conveyed to the AKS cluster.
-
+
```bash
Failed to pull image "container-registry.oracle.com/middleware/weblogic:12.2.1.3": rpc error: code = Unknown desc = Error response from daemon: Get https://container-registry-phx.oracle.com/v2/middleware/weblogic/manifests/12.2.1.3: unauthorized: authentication required
```
-
+
Ensure the arguments you passed to the script `create-docker-credentials-secret.sh` are correct with respect to your Oracle SSO credentials.
The following example output shows the WebLogic domain was created successfully.
@@ -692,7 +692,7 @@ Now that we have created the AKS cluster, installed the operator, and verified t
service/domain1-cluster-1-external-lb created
```
- After a short time, you will see the Administration Server and Managed Servers running.
+ After a short time, you will see the Administration Server and Managed Servers running.
Use the following command to check server pod status:
@@ -700,14 +700,14 @@ Now that we have created the AKS cluster, installed the operator, and verified t
$ kubectl get pods --watch
```
- It may take you up to 20 minutes to deploy all pods, please wait and make sure everything is ready.
-
+ It may take you up to 20 minutes to deploy all pods, please wait and make sure everything is ready.
+
You can tail the logs of the Administration Server with this command:
-
+
```bash
kubectl logs -f domain1-admin-server
```
-
+
The final example of pod output is as following:
```bash
@@ -719,7 +719,7 @@ Now that we have created the AKS cluster, installed the operator, and verified t
domain1-managed-server2 1/1 Running 0 3m56s
weblogic-operator-56654bcdb7-qww7f 1/1 Running 0 25m
```
-
+
{{% notice tip %}} If Kubernetes advertises the WebLogic pod as `Running` you can be assured the WebLogic Server actually is running because the operator ensures the Kubernetes health checks are actually polling the WebLogic health check mechanism.
{{% /notice %}}
@@ -729,13 +729,13 @@ Now that we have created the AKS cluster, installed the operator, and verified t
$ kubectl get svc --watch
```
- The final example of servcie output is as following:
+ The final example of service output is as following:
```bash
$ kubectl get svc --watch
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
domain1-admin-server ClusterIP None 30012/TCP,7001/TCP 2d20h
- domain1-admin-server-external NodePort 10.0.182.50 7001:30701/TCP 2d20h
+ domain1-admin-server-ext NodePort 10.0.182.50 7001:30701/TCP 2d20h
domain1-admin-server-external-lb LoadBalancer 10.0.67.79 52.188.176.103 7001:32227/TCP 2d20h
domain1-cluster-1-lb LoadBalancer 10.0.112.43 104.45.176.215 8001:30874/TCP 2d17h
domain1-cluster-cluster-1 ClusterIP 10.0.162.19 8001/TCP 2d20h
@@ -933,7 +933,7 @@ The logs are stored in the Azure file share. Follow these steps to access the lo
$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
domain1-admin-server ClusterIP None 30012/TCP,7001/TCP 7m3s
- domain1-admin-server-external NodePort 10.0.78.211 7001:30701/TCP 7m3s
+ domain1-admin-server-ext NodePort 10.0.78.211 7001:30701/TCP 7m3s
domain1-admin-server-external-lb LoadBalancer 10.0.6.144 40.71.233.81 7001:32758/TCP 7m32s
domain1-cluster-1-lb LoadBalancer 10.0.29.231 52.142.39.152 8001:31022/TCP 7m30s
domain1-cluster-cluster-1 ClusterIP 10.0.80.134 8001/TCP 1s
@@ -1001,11 +1001,11 @@ The logs are stored in the Azure file share. Follow these steps to access the lo
The output from the `create-domain-on-aks.sh` script includes a statement about the Azure resources created by the script. To delete the cluster and free all related resources, simply delete the resource groups. The output will list the resource groups, such as.
```bash
-The following Azure Resouces have been created:
+The following Azure Resouces have been created:
Resource groups: ejb8191resourcegroup1597641911, MC_ejb8191resourcegroup1597641911_ejb8191akscluster1597641911_eastus
```
-Given the above output, the following Azure CLI commands will delete the resource groups.
+Given the above output, the following Azure CLI commands will delete the resource groups.
```bash
az group delete --yes --no-wait --name ejb8191resourcegroup1597641911
diff --git a/docs-source/content/samples/simple/domains/domain-home-in-image/_index.md b/docs-source/content/samples/simple/domains/domain-home-in-image/_index.md
index cddbe5b09cb..f8fea4b7d48 100644
--- a/docs-source/content/samples/simple/domains/domain-home-in-image/_index.md
+++ b/docs-source/content/samples/simple/domains/domain-home-in-image/_index.md
@@ -150,7 +150,7 @@ The following parameters can be provided in the inputs file.
| `initialManagedServerReplicas` | Number of Managed Servers to initially start for the domain. | `2` |
| `javaOptions` | Java options for starting the Administration Server and Managed Servers. A Java option can have references to one or more of the following pre-defined variables to obtain WebLogic domain information: `$(DOMAIN_NAME)`, `$(DOMAIN_HOME)`, `$(ADMIN_NAME)`, `$(ADMIN_PORT)`, and `$(SERVER_NAME)`. If `sslEnabled` is set to `true` and the WebLogic demo certificate is used, add `-Dweblogic.security.SSL.ignoreHostnameVerification=true` to allow the managed servers to connect to the Administration Server while booting up. The WebLogic generated demo certificate in this environment typically contains a host name that is different from the runtime container's host name. | `-Dweblogic.StdoutDebugEnabled=false` |
| `logHomeOnPV` | Specifies whether the log home is stored on the persistent volume. If set to true, then you must specify the `logHome`, `persistentVolumeClaimName`, and `domainPVMountPath` parameters.| `false` |
-| `logHome` | The in-pod location for domain log, server logs, server out, Node Manager log, and server HTTP access log files. If not specified, the value is derived from the `domainUID` as `/shared/logs/`. This parameter is required if `logHomeOnPV` is true. Otherwise, it is ignored. | `/shared/logs/domain1` |
+| `logHome` | The in-pod location for domain log, server logs, server out, Node Manager log, introspector out, and server HTTP access log files. If not specified, the value is derived from the `domainUID` as `/shared/logs/`. This parameter is required if `logHomeOnPV` is true. Otherwise, it is ignored. | `/shared/logs/domain1` |
| `managedServerNameBase` | Base string used to generate Managed Server names. | `managed-server` |
| `managedServerPort` | Port number for each Managed Server. | `8001` |
| `managedServerSSLPort` | SSL port number for each Managed Server. | `8002` |
@@ -210,7 +210,7 @@ spec:
includeServerOutInPodLog: true
# Whether to enable log home
# logHomeEnabled: false
- # The in-pod location for domain log, server logs, server out, and Node Manager log files
+ # The in-pod location for domain log, server logs, server out, introspector out, and Node Manager log files
# logHome: /shared/logs/domain1
# serverStartPolicy legal values are "NEVER", "IF_NEEDED", or "ADMIN_ONLY"
# This determines which WebLogic Servers the operator will start up when it discovers this Domain
diff --git a/docs-source/content/samples/simple/domains/domain-home-on-pv/_index.md b/docs-source/content/samples/simple/domains/domain-home-on-pv/_index.md
index 3a0a8de39d2..d876a3f5a71 100644
--- a/docs-source/content/samples/simple/domains/domain-home-on-pv/_index.md
+++ b/docs-source/content/samples/simple/domains/domain-home-on-pv/_index.md
@@ -118,7 +118,7 @@ The following parameters can be provided in the inputs file.
| `includeServerOutInPodLog` | Boolean indicating whether to include the server `.out` in the pod's `stdout`. | `true` |
| `initialManagedServerReplicas` | Number of Managed Servers to start initially for the domain. | `2` |
| `javaOptions` | Java options for starting the Administration Server and Managed Servers. A Java option can have references to one or more of the following pre-defined variables to obtain WebLogic domain information: `$(DOMAIN_NAME)`, `$(DOMAIN_HOME)`, `$(ADMIN_NAME)`, `$(ADMIN_PORT)`, and `$(SERVER_NAME)`. | `-Dweblogic.StdoutDebugEnabled=false` |
-| `logHome` | The in-pod location for domain log, server logs, server out, Node Manager log, and server HTTP access log files. If not specified, the value is derived from the `domainUID` as `/shared/logs/`. | `/shared/logs/domain1` |
+| `logHome` | The in-pod location for domain log, server logs, server out, introspector out, Node Manager log, and server HTTP access log files. If not specified, the value is derived from the `domainUID` as `/shared/logs/`. | `/shared/logs/domain1` |
| `managedServerNameBase` | Base string used to generate Managed Server names. | `managed-server` |
| `managedServerPort` | Port number for each Managed Server. | `8001` |
| `namespace` | Kubernetes Namespace in which to create the domain. | `default` |
@@ -177,7 +177,7 @@ spec:
includeServerOutInPodLog: true
# Whether to enable log home
logHomeEnabled: true
- # The in-pod name location for domain log, server logs, server out, and Node Manager log files
+ # The in-pod name location for domain log, server logs, server out, introspector out, and Node Manager log files
logHome: /shared/logs/domain1
# serverStartPolicy legal values are "NEVER", "IF_NEEDED", or "ADMIN_ONLY"
# This determines which WebLogic Servers the operator will start up when it discovers this Domain
@@ -397,7 +397,7 @@ Here is an example of the output of this command:
$ kubectl get services
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
domain1-admin-server ClusterIP 10.96.206.134 7001/TCP 23m
-domain1-admin-server-external NodePort 10.107.164.241 30012:30012/TCP 22m
+domain1-admin-server-ext NodePort 10.107.164.241 30012:30012/TCP 22m
domain1-cluster-cluster-1 ClusterIP 10.109.133.168 8001/TCP 22m
domain1-managed-server1 ClusterIP None 8001/TCP 22m
domain1-managed-server2 ClusterIP None 8001/TCP 22m
diff --git a/docs-source/content/samples/simple/domains/fmw-domain/_index.md b/docs-source/content/samples/simple/domains/fmw-domain/_index.md
index aaabb4093c9..4b4b36b960d 100644
--- a/docs-source/content/samples/simple/domains/fmw-domain/_index.md
+++ b/docs-source/content/samples/simple/domains/fmw-domain/_index.md
@@ -122,7 +122,7 @@ The following parameters can be provided in the inputs file.
| `includeServerOutInPodLog` | Boolean indicating whether to include the server `.out` in the pod's `stdout`. | `true` |
| `initialManagedServerReplicas` | Number of Managed Servers to start initially for the domain. | `2` |
| `javaOptions` | Java options for starting the Administration Server and Managed Servers. A Java option can have references to one or more of the following pre-defined variables to obtain WebLogic domain information: `$(DOMAIN_NAME)`, `$(DOMAIN_HOME)`, `$(ADMIN_NAME)`, `$(ADMIN_PORT)`, and `$(SERVER_NAME)`. | `-Dweblogic.StdoutDebugEnabled=false` |
-| `logHome` | The in-pod location for the domain log, server logs, server out, Node Manager log, and server HTTP access log files. If not specified, the value is derived from the `domainUID` as `/shared/logs/`. | `/shared/logs/domain1` |
+| `logHome` | The in-pod location for the domain log, server logs, server out, Node Manager log, introspector out, and server HTTP access log files. If not specified, the value is derived from the `domainUID` as `/shared/logs/`. | `/shared/logs/domain1` |
| `managedServerNameBase` | Base string used to generate Managed Server names. | `managed-server` |
| `managedServerPort` | Port number for each Managed Server. | `8001` |
| `namespace` | Kubernetes Namespace in which to create the domain. | `default` |
@@ -194,7 +194,7 @@ spec:
includeServerOutInPodLog: true
# Whether to enable log home
logHomeEnabled: true
- # The in-pod location for domain log, server logs, server out, and Node Manager log files
+ # The in-pod location for domain log, server logs, server out, introspector out, and Node Manager log files
logHome: /shared/logs/fmw-domain
# serverStartPolicy legal values are "NEVER", "IF_NEEDED", or "ADMIN_ONLY"
# This determines which WebLogic Servers the Operator will start up when it discovers this Domain
@@ -450,7 +450,7 @@ Here is an example of the output of this command:
$ kubectl get services
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
fmw-domain-admin-server ClusterIP None 7001/TCP 15h
-fmw-domain-admin-server-external NodePort 10.101.26.42 7001:30731/TCP 15h
+fmw-domain-admin-server-ext NodePort 10.101.26.42 7001:30731/TCP 15h
fmw-domain-cluster-cluster-1 ClusterIP 10.107.55.188 8001/TCP 15h
fmw-domain-managed-server1 ClusterIP None 8001/TCP 15h
fmw-domain-managed-server2 ClusterIP None 8001/TCP 15h
diff --git a/docs-source/content/samples/simple/domains/lifecycle/_index.md b/docs-source/content/samples/simple/domains/lifecycle/_index.md
new file mode 100644
index 00000000000..0b9bb64855a
--- /dev/null
+++ b/docs-source/content/samples/simple/domains/lifecycle/_index.md
@@ -0,0 +1,14 @@
+---
+title: "Domain lifecycle operations"
+date: 2019-02-23T17:32:31-05:00
+weight: 8
+description: "Start and stop Managed Servers, clusters, and domains."
+---
+
+#### Domain lifecycle sample scripts
+
+Beginning in version 3.1.0, the operator provides sample scripts to start up or shut down a specific Managed Server or cluster in a deployed domain, or the entire deployed domain.
+
+**Note**: Prior to running these scripts, you must have previously created and deployed the domain.
+
+The scripts are located in the `kubernetes/samples/scripts/domain-lifecycle` directory. They are helpful when scripting the life cycle of a WebLogic Server domain. For more information, see the [README](https://github.com/oracle/weblogic-kubernetes-operator/tree/master/kubernetes/samples/scripts/domain-lifecycle/README.md).
diff --git a/docs-source/content/samples/simple/domains/model-in-image/cleanup.md b/docs-source/content/samples/simple/domains/model-in-image/cleanup.md
index bd3a74fc7c5..239b22163b8 100644
--- a/docs-source/content/samples/simple/domains/model-in-image/cleanup.md
+++ b/docs-source/content/samples/simple/domains/model-in-image/cleanup.md
@@ -21,7 +21,7 @@ To remove the resources you have created in these samples:
2. If you set up the Traefik ingress controller:
```
- $ helm delete --purge traefik-operator
+ $ helm uninstall traefik-operator -n traefik
$ kubectl delete namespace traefik
```
@@ -32,7 +32,7 @@ To remove the resources you have created in these samples:
4. Delete the operator and its namespace:
```
- $ helm delete --purge sample-weblogic-operator
+ $ helm uninstall sample-weblogic-operator -n sample-weblogic-operator-ns
$ kubectl delete namespace sample-weblogic-operator-ns
```
diff --git a/docs-source/content/samples/simple/domains/model-in-image/initial.md b/docs-source/content/samples/simple/domains/model-in-image/initial.md
index 22a05aa5945..c3a2d9f3daa 100644
--- a/docs-source/content/samples/simple/domains/model-in-image/initial.md
+++ b/docs-source/content/samples/simple/domains/model-in-image/initial.md
@@ -430,7 +430,7 @@ Copy the following to a file called `/tmp/mii-sample/mii-initial.yaml` or simila
# Whether to enable overriding your log file location, see also 'logHome'
#logHomeEnabled: false
- # The location for domain log, server logs, server out, and Node Manager log files
+ # The location for domain log, server logs, server out, introspector out, and Node Manager log files
# see also 'logHomeEnabled', 'volumes', and 'volumeMounts'.
#logHome: /shared/logs/sample-domain1
@@ -552,7 +552,7 @@ Copy the following to a file called `/tmp/mii-sample/mii-initial.yaml` or simila
# Whether to enable overriding your log file location, see also 'logHome'
#logHomeEnabled: false
- # The location for domain log, server logs, server out, and Node Manager log files
+ # The location for domain log, server logs, server out, introspector out, and Node Manager log files
# see also 'logHomeEnabled', 'volumes', and 'volumeMounts'.
#logHome: /shared/logs/sample-domain1
@@ -658,11 +658,11 @@ Copy the following to a file called `/tmp/mii-sample/mii-initial.yaml` or simila
```
$ kubectl get pods -n sample-domain1-ns --watch
NAME READY STATUS RESTARTS AGE
- sample-domain1-introspect-domain-job-lqqj9 0/1 Pending 0 0s
- sample-domain1-introspect-domain-job-lqqj9 0/1 ContainerCreating 0 0s
- sample-domain1-introspect-domain-job-lqqj9 1/1 Running 0 1s
- sample-domain1-introspect-domain-job-lqqj9 0/1 Completed 0 65s
- sample-domain1-introspect-domain-job-lqqj9 0/1 Terminating 0 65s
+ sample-domain1-introspector-lqqj9 0/1 Pending 0 0s
+ sample-domain1-introspector-lqqj9 0/1 ContainerCreating 0 0s
+ sample-domain1-introspector-lqqj9 1/1 Running 0 1s
+ sample-domain1-introspector-lqqj9 0/1 Completed 0 65s
+ sample-domain1-introspector-lqqj9 0/1 Terminating 0 65s
sample-domain1-admin-server 0/1 Pending 0 0s
sample-domain1-admin-server 0/1 ContainerCreating 0 0s
sample-domain1-admin-server 0/1 Running 0 1s
@@ -732,14 +732,14 @@ Alternatively, you can run `/tmp/mii-sample/utils/wl-pod-wait.sh -p 3`. This is
NAME VERSION IMAGE READY PHASE
-------------------------------------------- ------- ----- ----- ---------
- 'sample-domain1-introspect-domain-job-rkdkg' '' '' '' 'Pending'
+ 'sample-domain1-introspector-rkdkg' '' '' '' 'Pending'
@@ [2020-04-30T13:50:45][seconds=3] Info: '0' WebLogic Server pods currently match all criteria, expecting '3'.
@@ [2020-04-30T13:50:45][seconds=3] Info: Introspector and WebLogic Server pods with same namespace and domain-uid:
NAME VERSION IMAGE READY PHASE
-------------------------------------------- ------- ----- ----- ---------
- 'sample-domain1-introspect-domain-job-rkdkg' '' '' '' 'Running'
+ 'sample-domain1-introspector-rkdkg' '' '' '' 'Running'
@@ [2020-04-30T13:51:50][seconds=68] Info: '0' WebLogic Server pods currently match all criteria, expecting '3'.
diff --git a/docs-source/content/samples/simple/domains/model-in-image/prerequisites.md b/docs-source/content/samples/simple/domains/model-in-image/prerequisites.md
index 8319b7281bd..9155cfbd17e 100644
--- a/docs-source/content/samples/simple/domains/model-in-image/prerequisites.md
+++ b/docs-source/content/samples/simple/domains/model-in-image/prerequisites.md
@@ -53,8 +53,8 @@ weight: 1
- Option 1: To create the ingresses, use the following YAML file to create a file called `/tmp/mii-sample/ingresses/myingresses.yaml` and then call `kubectl apply -f /tmp/mii-sample/ingresses/myingresses.yaml`:
```yaml
- apiVersion: networking.k8s.io/v1beta1
- kind: Ingress
+ apiVersion: traefik.containo.us/v1alpha1
+ kind: IngressRoute
metadata:
name: traefik-ingress-sample-domain1-admin-server
namespace: sample-domain1-ns
@@ -63,17 +63,16 @@ weight: 1
annotations:
kubernetes.io/ingress.class: traefik
spec:
- rules:
- - host:
- http:
- paths:
- - path: /console
- backend:
- serviceName: sample-domain1-admin-server
- servicePort: 7001
+ routes:
+ - kind: Rule
+ match: PathPrefix(`/console`)
+ services:
+ - kind: Service
+ name: sample-domain1-admin-server
+ port: 7001
---
- apiVersion: networking.k8s.io/v1beta1
- kind: Ingress
+ apiVersion: traefik.containo.us/v1alpha1
+ kind: IngressRoute
metadata:
name: traefik-ingress-sample-domain1-cluster-cluster-1
namespace: sample-domain1-ns
@@ -82,17 +81,16 @@ weight: 1
annotations:
kubernetes.io/ingress.class: traefik
spec:
- rules:
- - host: sample-domain1-cluster-cluster-1.mii-sample.org
- http:
- paths:
- - path:
- backend:
- serviceName: sample-domain1-cluster-cluster-1
- servicePort: 8001
+ routes:
+ - kind: Rule
+ match: Host(`sample-domain1-cluster-cluster-1.mii-sample.org`)
+ services:
+ - kind: Service
+ name: sample-domain1-cluster-cluster-1
+ port: 8001
---
- apiVersion: networking.k8s.io/v1beta1
- kind: Ingress
+ apiVersion: traefik.containo.us/v1alpha1
+ kind: IngressRoute
metadata:
name: traefik-ingress-sample-domain2-cluster-cluster-1
namespace: sample-domain1-ns
@@ -101,14 +99,13 @@ weight: 1
annotations:
kubernetes.io/ingress.class: traefik
spec:
- rules:
- - host: sample-domain2-cluster-cluster-1.mii-sample.org
- http:
- paths:
- - path:
- backend:
- serviceName: sample-domain2-cluster-cluster-1
- servicePort: 8001
+ routes:
+ - kind: Rule
+ match: Host(`sample-domain2-cluster-cluster-1.mii-sample.org`)
+ services:
+ - kind: Service
+ name: sample-domain2-cluster-cluster-1
+ port: 8001
```
- Option 2: Run `kubectl apply -f` on each of the ingress YAML files that are already included in the sample source `/tmp/mii-sample/ingresses` directory:
@@ -147,10 +144,10 @@ weight: 1
```
$ cd /tmp/mii-sample/model-images
- $ curl -m 120 -fL https://github.com/oracle/weblogic-deploy-tooling/releases/download/weblogic-deploy-tooling-1.9.1/weblogic-deploy.zip \
+ $ curl -m 120 -fL https://github.com/oracle/weblogic-deploy-tooling/releases/download/release-1.9.7/weblogic-deploy.zip \
-o /tmp/mii-sample/model-images/weblogic-deploy.zip
- $ curl -m 120 -fL https://github.com/oracle/weblogic-image-tool/releases/download/release-1.9.1/imagetool.zip \
+ $ curl -m 120 -fL https://github.com/oracle/weblogic-image-tool/releases/download/release-1.9.5/imagetool.zip \
-o /tmp/mii-sample/model-images/imagetool.zip
```
diff --git a/docs-source/content/samples/simple/domains/model-in-image/update1.md b/docs-source/content/samples/simple/domains/model-in-image/update1.md
index d9a4da861ae..7bc77216b64 100644
--- a/docs-source/content/samples/simple/domains/model-in-image/update1.md
+++ b/docs-source/content/samples/simple/domains/model-in-image/update1.md
@@ -242,7 +242,7 @@ Here are the steps:
NAME VERSION IMAGE READY PHASE
-------------------------------------------- ------- ----------------------- ------ ---------
'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running'
- 'sample-domain1-introspect-domain-job-wlkpr' '' '' '' 'Pending'
+ 'sample-domain1-introspector-wlkpr' '' '' '' 'Pending'
'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running'
'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running'
@@ -252,7 +252,7 @@ Here are the steps:
NAME VERSION IMAGE READY PHASE
-------------------------------------------- ------- ----------------------- ------ ---------
'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running'
- 'sample-domain1-introspect-domain-job-wlkpr' '' '' '' 'Running'
+ 'sample-domain1-introspector-wlkpr' '' '' '' 'Running'
'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running'
'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running'
@@ -262,7 +262,7 @@ Here are the steps:
NAME VERSION IMAGE READY PHASE
-------------------------------------------- ------- ----------------------- ------ -----------
'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running'
- 'sample-domain1-introspect-domain-job-wlkpr' '' '' '' 'Succeeded'
+ 'sample-domain1-introspector-wlkpr' '' '' '' 'Succeeded'
'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running'
'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running'
diff --git a/docs-source/content/samples/simple/domains/model-in-image/update2.md b/docs-source/content/samples/simple/domains/model-in-image/update2.md
index 60803355918..baf4dcb77c5 100644
--- a/docs-source/content/samples/simple/domains/model-in-image/update2.md
+++ b/docs-source/content/samples/simple/domains/model-in-image/update2.md
@@ -309,13 +309,13 @@ Here are the steps for this use case:
sample-domain1-admin-server 1/1 Running 0 5d2h
sample-domain1-managed-server1 1/1 Running 1 5d2h
sample-domain1-managed-server2 1/1 Running 2 5d2h
- sample-domain2-introspect-domain-job-plssr 0/1 Pending 0 0s
- sample-domain2-introspect-domain-job-plssr 0/1 Pending 0 0s
- sample-domain2-introspect-domain-job-plssr 0/1 ContainerCreating 0 0s
- sample-domain2-introspect-domain-job-plssr 1/1 Running 0 2s
- sample-domain2-introspect-domain-job-plssr 0/1 Completed 0 69s
- sample-domain2-introspect-domain-job-plssr 0/1 Terminating 0 71s
- sample-domain2-introspect-domain-job-plssr 0/1 Terminating 0 71s
+ sample-domain2-introspector-plssr 0/1 Pending 0 0s
+ sample-domain2-introspector-plssr 0/1 Pending 0 0s
+ sample-domain2-introspector-plssr 0/1 ContainerCreating 0 0s
+ sample-domain2-introspector-plssr 1/1 Running 0 2s
+ sample-domain2-introspector-plssr 0/1 Completed 0 69s
+ sample-domain2-introspector-plssr 0/1 Terminating 0 71s
+ sample-domain2-introspector-plssr 0/1 Terminating 0 71s
sample-domain2-admin-server 0/1 Pending 0 0s
sample-domain2-admin-server 0/1 Pending 0 0s
sample-domain2-admin-server 0/1 ContainerCreating 0 0s
@@ -352,14 +352,14 @@ Here are the steps for this use case:
NAME VERSION IMAGE READY PHASE
-------------------------------------------- ------- ----- ----- ---------
- 'sample-domain2-introspect-domain-job-plssr' '' '' '' 'Running'
+ 'sample-domain2-introspector-plssr' '' '' '' 'Running'
@@ [2020-05-13T17:07:03][seconds=64] Info: '0' WebLogic Server pods currently match all criteria, expecting '3'.
@@ [2020-05-13T17:07:03][seconds=64] Info: Introspector and WebLogic Server pods with same namespace and domain-uid:
NAME VERSION IMAGE READY PHASE
-------------------------------------------- ------- ----- ----- -----------
- 'sample-domain2-introspect-domain-job-plssr' '' '' '' 'Succeeded'
+ 'sample-domain2-introspector-plssr' '' '' '' 'Succeeded'
@@ [2020-05-13T17:07:06][seconds=67] Info: '0' WebLogic Server pods currently match all criteria, expecting '3'.
@@ [2020-05-13T17:07:06][seconds=67] Info: Introspector and WebLogic Server pods with same namespace and domain-uid:
diff --git a/docs-source/content/samples/simple/domains/model-in-image/update3.md b/docs-source/content/samples/simple/domains/model-in-image/update3.md
index 83464f4931e..fb0a5a58e4d 100644
--- a/docs-source/content/samples/simple/domains/model-in-image/update3.md
+++ b/docs-source/content/samples/simple/domains/model-in-image/update3.md
@@ -235,7 +235,7 @@ Here are the steps for this use case:
NAME VERSION IMAGE READY PHASE
-------------------------------------------- ------- ----------------------- ------ ---------
'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running'
- 'sample-domain1-introspect-domain-job-g5kzn' '' '' '' 'Running'
+ 'sample-domain1-introspector-g5kzn' '' '' '' 'Running'
'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running'
'sample-domain1-managed-server2' '2' 'model-in-image:WLS-v1' 'true' 'Running'
diff --git a/docs-source/content/samples/simple/ingress/_index.md b/docs-source/content/samples/simple/ingress/_index.md
index 261d1ff56bb..89f67158e1b 100644
--- a/docs-source/content/samples/simple/ingress/_index.md
+++ b/docs-source/content/samples/simple/ingress/_index.md
@@ -17,6 +17,7 @@ The samples are located in following folders:
* [Traefik](https://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/charts/traefik/README.md)
* [Voyager](https://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/charts/voyager/README.md)
+* [NGINX](https://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/charts/nginx/README.md)
* Apache-samples/[custom-sample](https://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/charts/apache-samples/custom-sample/README.md)
* Apache-samples/[default-sample](https://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/charts/apache-samples/default-sample/README.md)
* [Ingress-per-domain](https://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/charts/ingress-per-domain/README.md)
diff --git a/docs-source/content/security/openshift.md b/docs-source/content/security/openshift.md
index 9dd631710f8..19ea50e595c 100644
--- a/docs-source/content/security/openshift.md
+++ b/docs-source/content/security/openshift.md
@@ -5,41 +5,109 @@ weight: 7
description: "OpenShift information for the operator"
---
-#### OpenShift `anyuid` security context
+#### Security requirements to run WebLogic in OpenShift
-The Docker images that Oracle publishes default to the container user
-as `oracle`, which is UID `1000` and GID `1000`. When running the
-Oracle images or layered images that retain the default user as
-`oracle` with OpenShift, the `anyuid` security context constraint
-is required to ensure proper access to the file system within the
-Docker image. This means that the administrator must:
+WebLogic Server Kubernetes Operator Docker images starting with version 3.1 and
+WebLogic Server Docker images obtained from Oracle Container Registry after August 2020
+have an `oracle` user with UID 1000 with the default group set to `root`.
-1. Ensure the `anyuid` security content is granted
-2. Ensure that WebLogic containers are annotated with `openshift.io/scc: anyuid`
-
-For example, to update the OpenShift policy, use:
+Here is an excerpt from a standard WebLogic [Dockerfile](https://github.com/oracle/docker-images/blob/master/OracleWebLogic/dockerfiles/12.2.1.4/Dockerfile.generic#L89)
+that demonstrates how the file system group ownership is configured in the standard WebLogic Server images:
```bash
-$ oc adm policy add-scc-to-user anyuid -z default
+# Setup filesystem and oracle user
+# Adjust file permissions, go to /u01 as user 'oracle' to proceed with WLS installation
+# ------------------------------------------------------------
+RUN mkdir -p /u01 && \
+ chmod 775 /u01 && \
+ useradd -b /u01 -d /u01/oracle -m -s /bin/bash oracle && \
+ chown oracle:root /u01
+
+COPY --from=builder --chown=oracle:root /u01 /u01
```
-To annotate the WebLogic containers, update the WebLogic `Domain` resource
-to include `annotations` for the `serverPod`. For example:
+OpenShift, by default, enforces the `restricted` security context constraint which
+allocates a high, random UID in the `root` group for each container. The standard
+images mentioned above are designed to work with the `restricted` security context constraint.
+
+However, if you build your own image, have an older version of an image, or obtain an
+image from another source, it may not have the necessary permissions. You may need to
+configure similar file system permissions to allow your image to work in OpenShift.
+Specifically, you need to make sure the following directories have `root` as their
+group, and that the group read, write and execute permissions are set (enabled):
+
+* For the operator, `/operator` and `/logs`.
+* For WebLogic Server images, `/u01` (or the ultimate parent directory of your
+ Oracle Home and domain if you put them in different locations).
+
+If your OpenShift environment has a different default security context constraint,
+you may need to configure OpenShift to allow use of UID 1000 by creating
+a security context constraint. Oracle recommends that you define
+a custom security context constraint that has just the permissions that are required
+and apply that to WebLogic pods. Oracle does not recommend using the built-in `anyuid`
+Security Context Constraint, because it provides more permissions
+than are needed, and is therefore less secure.
+
+#### Create a custom Security Context Constraint
-``` yaml
-kind: Domain
+To create a custom security context constraint, create a YAML file with the following
+content. This example assumes that your OpenShift project is called `weblogic` and
+that the service account you will use to run the operator and domains
+is called `weblogic-operator`. You should change these
+in the `groups` and `users` sections to match your environment.
+
+```yaml
+kind: SecurityContextConstraints
+apiVersion: v1
metadata:
- name: domain1
-spec:
- domainUID: domain1
- serverPod:
- env:
- - name: var1
- value: value1
- annotations:
- openshift.io/scc: anyuid
+ name: uid1000
+allowHostDirVolumePlugin: false
+allowHostIPC: false
+allowHostNetwork: false
+allowHostPID: false
+allowHostPorts: false
+allowPrivilegeEscalation: true
+allowPrivilegedContainer: false
+fsGroup:
+ type: MustRunAs
+groups:
+- system:serviceaccounts:weblogic
+readOnlyRootFilesystem: false
+requiredDropCapabilities:
+- KILL
+- MKNOD
+- SETUID
+- SETGID
+runAsUser:
+ type: MustRunAs
+ uid: 1000
+seLinuxContext:
+ type: MustRunAs
+supplementalGroups:
+ type: RunAsAny
+users:
+- system:serviceaccount:weblogic:weblogic-operator
+volumes:
+- configMap
+- downwardAPI
+- emptyDir
+- persistentVolumeClaim
+- projected
+- secret
+```
+
+Assuming you called that file `uid1000.yaml`, you can create the security context constraint
+using the following command:
+
+```bash
+$ oc create -f uid1000.yaml
```
+After you have created the security context constraint, you can install the WebLogic Server Kubernetes Operator.
+Make sure you use the same service account to which you granted permission in the security
+context constraint (`weblogic-operator` in the preceding example). The operator will then run
+with UID 1000, and any WebLogic domain it creates will also run with UID 1000.
+
{{% notice note %}}
For additional information about OpenShift requirements and the operator,
see the [OpenShift]({{}}) section in the User Guide.
diff --git a/docs-source/content/security/rbac.md b/docs-source/content/security/rbac.md
index 06fb4f44b0d..e23c5912cb2 100644
--- a/docs-source/content/security/rbac.md
+++ b/docs-source/content/security/rbac.md
@@ -136,7 +136,5 @@ the following `ClusterRoleBinding` entries are mapped to a `ClusterRole` grantin
[^1]: The binding is assigned to the operator `ServiceAccount`.
[^2]: The binding is assigned to the operator `ServiceAccount`
- in each namespace listed with the `domainNamespaces` setting.
- The `domainNamespaces` setting contains the list of namespaces
- that the operator is configured to manage.
+ in each namespace that the operator is configured to manage. See [Managing domain namespaces]({{< relref "/faq/namespace-management.md" >}})
[^3]: The binding is assigned to the operator `ServiceAccount`. In addition, the Kubernetes RBAC resources that the operator installation actually set up will be adjusted based on the value of the `dedicated` setting. By default, the `dedicated` value is set to `false`, those security resources are created as `ClusterRole` and `ClusterRoleBindings`. If the `dedicated` value is set to `true`, those resources will be created as `Roles` and `RoleBindings` in the namespace of the operator.
diff --git a/docs-source/content/userguide/introduction/architecture.md b/docs-source/content/userguide/introduction/architecture.md
index aafe7589519..6b8b45172f4 100644
--- a/docs-source/content/userguide/introduction/architecture.md
+++ b/docs-source/content/userguide/introduction/architecture.md
@@ -18,7 +18,7 @@ The operator is packaged in a [Docker image](https://hub.docker.com/r/oracle/web
```
$ docker login
-$ docker pull oracle/weblogic-kubernetes-operator:3.0.3
+$ docker pull oracle/weblogic-kubernetes-operator:3.1.0
```
For more details on acquiring the operator image and prerequisites for installing the operator, consult the [Quick Start guide]({{< relref "/quickstart/_index.md" >}}).
@@ -52,6 +52,10 @@ This diagram shows the following details:
* A `ClusterIP` type service is created for each Managed Server pod that contains a Managed Server that is not part of a WebLogic cluster. These services are intended to be used to access applications running on the Managed Servers. These services are labeled with `weblogic.domainUID` and `weblogic.domainName`. Customers must expose these services using a load balancer or `NodePort` type service to expose these endpoints outside the Kubernetes cluster.
* An Ingress may optionally be created by the customer for each WebLogic cluster. An Ingress provides load balanced HTTP access to all Managed Servers in that WebLogic cluster. The load balancer updates its routing table for an Ingress every time a Managed Server in the WebLogic cluster becomes “ready” or ceases to be able to service requests, such that the Ingress always points to just those Managed Servers that are able to handle user requests.
+{{% notice note %}}
+Kubernetes requires that the names of some resource types follow the DNS label standard as defined in [DNS Label Names](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names) and [RFC 1123](https://tools.ietf.org/html/rfc1123). Therefore, the operator enforces that the names of the Kubernetes resources do not exceed Kubernetes limits (see [Meet Kubernetes resource name restrictions]({{< relref "/userguide/managing-domains/_index.md#meet-kubernetes-resource-name-restrictions" >}})).
+{{% /notice %}}
+
The diagram below shows the components inside the containers running WebLogic Server instances:
![Inside a container](/weblogic-kubernetes-operator/images/inside-a-container.png)
diff --git a/docs-source/content/userguide/introduction/introduction.md b/docs-source/content/userguide/introduction/introduction.md
index f2a2738fbec..7ff9079552b 100644
--- a/docs-source/content/userguide/introduction/introduction.md
+++ b/docs-source/content/userguide/introduction/introduction.md
@@ -16,7 +16,7 @@ Detailed instructions are available [here]({{< relref "/userguide/managing-opera
### Operator prerequisites
-For the current production release 3.0.3:
+For the current production release 3.1.0:
* Kubernetes 1.14.8+, 1.15.7+, 1.16.0+, 1.17.0+, and 1.18.0+ (check with `kubectl version`).
* Flannel networking v0.9.1-amd64 or later (check with `docker images | grep flannel`), Calico networking (Calico v3.16.1),
diff --git a/docs-source/content/userguide/managing-domains/_index.md b/docs-source/content/userguide/managing-domains/_index.md
index 724f90640f7..0b8da36d5d4 100644
--- a/docs-source/content/userguide/managing-domains/_index.md
+++ b/docs-source/content/userguide/managing-domains/_index.md
@@ -8,10 +8,11 @@ description: "Important considerations for WebLogic domains in Kubernetes."
#### Contents
* [Important considerations for WebLogic domains in Kubernetes](#important-considerations-for-weblogic-domains-in-kubernetes)
+* [Meet Kubernetes resource name restrictions](#meet-kubernetes-resource-name-restrictions)
* [Creating and managing WebLogic domains](#creating-and-managing-weblogic-domains)
* [Modifying domain configurations](#modifying-domain-configurations)
* [About the Domain resource](#about-the-domain-resource)
-* [Managing life cycle operations](#managing-life-cycle-operations)
+* [Managing lifecycle operations](#managing-lifecycle-operations)
* [Scaling clusters](#scaling-clusters)
* [Log files](#log-files)
@@ -44,9 +45,9 @@ Be aware of the following important considerations for WebLogic domains running
the WebLogic Server name is `Admin_Server`, then its listen address becomes `domain1-admin-server`.
* _Domain, Cluster, Server, and Network-Access-Point Names:_ WebLogic domain, cluster, server, and network-access-point (channel)
- names must contain only the characters `A-Z`, `a-z`, `0-9`, `-`, or `_`. This ensures that they can be converted to
- meet Kubernetes resource and DNS1123 naming requirements. (When generating pod and service names, the operator will convert
- configured names to lower case and substitute a hyphen (`-`) for each underscore (`_`).)
+ names must contain only the characters `A-Z`, `a-z`, `0-9`, `-`, or `_`, and must be kept to a reasonable length. This ensures that they can
+ be safely used to form resource names that meet Kubernetes resource and DNS1123 naming requirements. For more details,
+ see [Meet Kubernetes resource name restrictions](#meet-kubernetes-resource-name-restrictions).
* _Node Ports:_ If you choose to expose any WebLogic channels outside the Kubernetes cluster using a `NodePort`, for example, the
administration port or a T3 channel to allow WLST access, you need to ensure that you allocate each channel a
@@ -86,6 +87,23 @@ The following features are **not** certified or supported in this release:
For up-to-date information about the features of WebLogic Server that are supported in Kubernetes environments, see My Oracle Support Doc ID 2349228.1.
+### Meet Kubernetes resource name restrictions
+
+Kubernetes requires that the names of some resource types follow the DNS label standard as defined in [DNS Label Names](https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#dns-label-names) and [RFC 1123](https://tools.ietf.org/html/rfc1123). This requirement restricts the characters that are allowed in the names of these resources, and also limits the length of these names to no more than 63 characters.
+
+The following is a list of such Kubernetes resources that the operator generates when a domain resource is deployed, including how their names are constructed.
+
+* A domain introspector job named `-`. The default suffix is `-introspector`, which can be overridden using the operator's Helm configuration `introspectorJobNameSuffix` (see [WebLogic domain management]({{< relref "/userguide/managing-operators/using-the-operator/using-helm#weblogic-domain-management" >}})).
+* A ClusterIP type service and a pod for each WebLogic Server named `-`.
+* A ClusterIP type service for each WebLogic cluster named `-cluster-`.
+* An optional NodePort type service, also known as an external service, for the WebLogic Administration Server named `--`. The default suffix is `-ext`, which can be overridden using the operator's Helm configuration `externalServiceNameSuffix` (see [WebLogic domain management]({{< relref "/userguide/managing-operators/using-the-operator/using-helm#weblogic-domain-management" >}})).
+
+The operator puts in place certain validation checks and conversions to prevent these resources from violating Kubernetes restrictions.
+* All the names previously described can contain only the characters `A-Z`, `a-z`, `0-9`, `-`, or `_`, and must start and end with an alphanumeric character. Note that when generating pod and service names, the operator will convert configured names to lower case and substitute a hyphen (`-`) for each underscore (`_`).
+* A `domainUID` is required to be no more than 45 characters.
+* WebLogic domain configuration names, such as the cluster names, Administration Server name, and Managed Server names must be kept to a legal length so that the resultant resource names do not exceed Kubernetes' limits.
+
+When a domain resource or WebLogic domain configuration violates the limits, the domain startup will fail, and actual validation errors are reported in the domain resource's status.
### Creating and managing WebLogic domains
@@ -152,12 +170,12 @@ Servers, you can set corresponding system properties in `JAVA_OPTIONS`:
cd('/')
create(dname,'Log')
cd('/Log/' + dname);
-
+
# configured server log for a server named 'sname'
cd('/Servers/' + sname)
create(sname, 'Log')
cd('/Servers/' + sname + '/Log/' + sname)
-
+
# templated (dynamic) server log for a template named 'tname'
cd('/ServerTemplates/' + tname)
create(tname,'Log')
@@ -169,10 +187,10 @@ Servers, you can set corresponding system properties in `JAVA_OPTIONS`:
```bash
# minimum log file size before rotation in kilobytes
set('FileMinSize', 1000)
-
+
# maximum number of rotated files
set('FileCount', 10)
-
+
# set to true to rotate file every time on startup (instead of append)
set('RotateLogOnStartup', 'true')
```
@@ -187,7 +205,7 @@ Servers, you can set corresponding system properties in `JAVA_OPTIONS`:
- For WebLogic Server `.log` and `.out` files (including both dynamic and configured servers), you can alternatively
set logging attributes using system properties that start with `weblogic.log.`
-and that end with the corresponding Log MBean attribute name.
+and that end with the corresponding Log MBean attribute name.
For example, you can include `-Dweblogic.log.FileMinSize=1000 -Dweblogic.log.FileCount=10 -Dweblogic.log.RotateLogOnStartup=true` in `domain.spec.serverPod.env.name.JAVA_OPTIONS` to set the behavior for all WebLogic Servers in your domain. For information about setting `JAVA_OPTIONS`, see [Domain resource]({{< relref "/userguide/managing-domains/domain-resource/_index.md#jvm-memory-and-java-option-environment-variables" >}}).
@@ -195,4 +213,3 @@ and that end with the corresponding Log MBean attribute name.
Kubernetes stores pod logs on each of its nodes, and, depending on the Kubernetes implementation, extra steps may be necessary to limit their disk space usage.
For more information, see [Kubernetes Logging Architecture](https://kubernetes.io/docs/concepts/cluster-administration/logging/).
{{% /notice %}}
-
diff --git a/docs-source/content/userguide/managing-domains/accessing-the-domain/wlst.md b/docs-source/content/userguide/managing-domains/accessing-the-domain/wlst.md
index d305301dc21..57f59ed1b07 100644
--- a/docs-source/content/userguide/managing-domains/accessing-the-domain/wlst.md
+++ b/docs-source/content/userguide/managing-domains/accessing-the-domain/wlst.md
@@ -10,13 +10,13 @@ description: "You can use the WebLogic Scripting Tool (WLST) to manage a domain
You can use the WebLogic Scripting Tool (WLST) to manage a domain running in Kubernetes. If the Administration Server was configured to expose a T3 channel using the `exposeAdminT3Channel` setting when creating the domain, then the matching T3 service can be used to connect. For example, if the `domainUID` is `domain1`, and the Administration Server name is `admin-server`, then the service would be called:
```
-domain1-admin-server-external
+domain1-admin-server-ext
```
This service will be in the same namespace as the domain. The external port number can be obtained by checking this service’s `nodePort`:
```
-$ kubectl get service domain1-admin-server-external -n domain1 -o jsonpath='{.spec.ports[0].nodePort}'
+$ kubectl get service domain1-admin-server-ext -n domain1 -o jsonpath='{.spec.ports[0].nodePort}'
30012
```
diff --git a/docs-source/content/userguide/managing-domains/configoverrides/_index.md b/docs-source/content/userguide/managing-domains/configoverrides/_index.md
index f912d41ed58..1f9fcd57660 100644
--- a/docs-source/content/userguide/managing-domains/configoverrides/_index.md
+++ b/docs-source/content/userguide/managing-domains/configoverrides/_index.md
@@ -3,7 +3,7 @@ title = "Configuration overrides"
date = 2019-02-23T16:45:16-05:00
weight = 5
pre = ""
-+++
++++
#### Contents
@@ -372,12 +372,18 @@ By setting the `FAIL_BOOT_ON_SITUATIONAL_CONFIG_ERROR` environment variable in t
* Make sure you've followed each step in the [Step-by-step guide](#step-by-step-guide).
* If WebLogic Server instance Pods do not come up at all, then:
- * In the domain's namespace, see if you can find a job named `DOMAIN_UID-introspect-domain-job` and a corresponding pod named something like `DOMAIN_UID-introspect-domain-job-xxxx`. If so, examine:
+ * Examine your Domain resource status: `kubectl -n MYDOMAINNAMESPACE describe domain MYDOMAIN`
+ * In the domain's namespace, see if you can find a job named `DOMAIN_UID-introspector` and a corresponding pod named something like `DOMAIN_UID-introspector-xxxx`. If so, examine:
* `kubectl -n MYDOMAINNAMESPACE describe job INTROSPECTJOBNAME`
* `kubectl -n MYDOMAINNAMESPACE logs INTROSPECTPODNAME`
* Check your operator log for Warning/Error/Severe messages.
* `kubectl -n MYOPERATORNAMESPACE logs OPERATORPODNAME`
+{{% notice tip %}}
+The introspector log is mirrored to the Domain resource `spec.logHome` directory
+when `spec.logHome` is configured and `spec.logHomeEnabled` is true.
+{{% /notice %}}
+
* If WebLogic Server instance Pods do start, then:
* Search your Administration Server Pod's `kubectl log` for the keyword `situational`, for example `kubectl logs MYADMINPOD | grep -i situational`.
* The only WebLogic Server log lines that match should look something like:
@@ -433,7 +439,7 @@ By setting the `FAIL_BOOT_ON_SITUATIONAL_CONFIG_ERROR` environment variable in t
### Internal design flow
* The operator generates the final configuration overrides, which include the merging of operator-generated overrides and the processing of any customer-provided configuration overrides templates and Secrets, during its introspection phase.
-* The operator creates a Kubernetes Job for introspection named `DOMAIN_UID-introspect-domain-job`.
+* The operator creates a Kubernetes Job for introspection named `DOMAIN_UID-introspector`.
* The introspector Job's Pod:
* Mounts the Kubernetes ConfigMap and Secrets specified by using the operator Domain `configuration.overridesConfigMap`, `webLogicCredentialsSecret`, and `configuration.secrets` fields.
* Reads the mounted configuration overrides templates from the ConfigMap and expands them to create the actual configuration overrides files for the domain:
diff --git a/docs-source/content/userguide/managing-domains/domain-lifecycle/introspection.md b/docs-source/content/userguide/managing-domains/domain-lifecycle/introspection.md
index dd7c77fc9dc..46d1af838ab 100644
--- a/docs-source/content/userguide/managing-domains/domain-lifecycle/introspection.md
+++ b/docs-source/content/userguide/managing-domains/domain-lifecycle/introspection.md
@@ -10,7 +10,7 @@ description: "This document describes domain introspection in the Oracle WebLogi
This document describes domain introspection, when it occurs automatically, and how and when to initiate additional introspections of the domain configuration in the Oracle WebLogic Server in Kubernetes environment.
In order to manage the operation of WebLogic domains in Kubernetes, the Oracle WebLogic Kubernetes Operator analyzes the WebLogic
-domain configuration using an "introspection" job. This Job will be named `DOMAIN_UID-introspect-domain-job`, will be run in the same namespace as the Domain, and must successfully complete before the operator will begin to start WebLogic Server instances. Because each of the
+domain configuration using an "introspection" job. This Job will be named `DOMAIN_UID-introspector`, will be run in the same namespace as the Domain, and must successfully complete before the operator will begin to start WebLogic Server instances. Because each of the
[domain home source types]({{< relref "/userguide/managing-domains/choosing-a-model/_index.md" >}}) are different (for instance, Domain in PV uses a domain home on a PersistentVolume while Model in Image generates the domain home dynamically from a WDT model), the Pod created by this Job will be
as similar as possible to the Pod that will later be generated for the Administration Server. This guarantees that the operator is
analyzing the same WebLogic domain configuration that WebLogic Server instances will use.
@@ -44,9 +44,24 @@ Set `introspectVersion` to a new value.
As with `restartVersion`, the `introspectVersion` field has no required format; however, we recommend using a value likely to be unique such as a continually increasing number or a timestamp.
+Beginning with operator 3.1.0, if a domain resource's `spec.introspectVersion` is set, each of the domain's WebLogic Server pods will have a label with the key `weblogic.introspectVersion` to indicate the `introspectVersion` at which the pod is running.
+
+```
+Name: domain1-admin-server
+Namespace: domain1-ns
+Labels: weblogic.createdByOperator=true
+ weblogic.domainName=domain1
+ weblogic.domainRestartVersion=abcdef
+ weblogic.domainUID=domain1
+ weblogic.introspectVersion=12345
+ weblogic.serverName=admin-server
+```
+
+When a domain's `spec.introspectVersion` is changed, the `weblogic.introspectVersion` label of each WebLogic Server pod is updated to the new `introspectVersion` value, either when the operator restarts the pod or when the operator determines that the pod does not need to be restarted.
+
#### Failed introspection
-Sometimes the Kubernetes Job, named `DOMAIN_UID-introspect-domain-job`, created for the introspection will fail.
+Sometimes the Kubernetes Job, named `DOMAIN_UID-introspector`, created for the introspection will fail.
When introspection fails, the operator will not start any WebLogic Server instances. If this is not the initial introspection and there are already WebLogic Server instances running, then a failed introspection will leave the existing WebLogic Server instances running without making any changes to the operational state of the domain.
@@ -54,6 +69,11 @@ The introspection will be periodically retried and then will eventually timeout
Please review the details for diagnosing introspection failures related to [configuration overrides]({{}}) or [Model in Image domain home generation]({{}}).
+{{% notice tip %}}
+The introspector log is mirrored to the Domain resource `spec.logHome` directory
+when `spec.logHome` is configured and `spec.logHomeEnabled` is true.
+{{% /notice %}}
+
### Introspection use cases
#### Adding clusters or Managed Servers to the WebLogic domain configuration
diff --git a/docs-source/content/userguide/managing-domains/domain-lifecycle/restarting.md b/docs-source/content/userguide/managing-domains/domain-lifecycle/restarting.md
index 33fa00494a2..81f5fef4511 100644
--- a/docs-source/content/userguide/managing-domains/domain-lifecycle/restarting.md
+++ b/docs-source/content/userguide/managing-domains/domain-lifecycle/restarting.md
@@ -188,7 +188,7 @@ d. Update the `image` field of the Domain YAML file, specifying the new image na
```
domain:
spec:
- image: oracle/weblogic-updated:3.0.3
+ image: oracle/weblogic-updated:3.1.0
```
e. The operator will now initiate a rolling restart, which will apply the updated image, for all the servers in the domain.
diff --git a/docs-source/content/userguide/managing-domains/domain-lifecycle/scaling.md b/docs-source/content/userguide/managing-domains/domain-lifecycle/scaling.md
index 9a4446942ed..c5354d2f12a 100644
--- a/docs-source/content/userguide/managing-domains/domain-lifecycle/scaling.md
+++ b/docs-source/content/userguide/managing-domains/domain-lifecycle/scaling.md
@@ -87,8 +87,27 @@ For example, when using `curl`:
curl -v -k -H X-Requested-By:MyClient -H Content-Type:application/json -H Accept:application/json -H "Authorization:Bearer ..." -d '{ "managedServerCount": 3 }' https://.../scaling
```
-If you omit the header, you'll get a `400 (bad request)` response. If you omit the Bearer Authentication header, then you'll get a `401 (Unauthorized)` response.
+If you omit the header, you'll get a `400 (bad request)` response. If you omit the Bearer Authentication header, then you'll get a `401 (Unauthorized)` response. If the service account or user associated with the `Bearer` token does not have permission to `patch` the WebLogic domain resource, then you'll get a `403 (Forbidden)` response.
+{{% notice note %}}
+To resolve a `403 (Forbidden)` response, when calling the operator's REST scaling API, you may need to add the `patch` request verb to the cluster role associated with the WebLogic `domains` resource.
+The example ClusterRole definition below grants `get`, `list`, `patch` and `update` access to the WebLogic `domains` resource
+{{% /notice %}}
+
+```
+kind: ClusterRole
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+ name: weblogic-domain-cluster-role
+rules:
+- apiGroups: ["weblogic.oracle"]
+ resources: ["domains"]
+ verbs: ["get", "list", "patch", update"]
+- apiGroups: ["apiextensions.k8s.io"]
+ resources: ["customresourcedefinitions"]
+ verbs: ["get", "list"]
+---
+```
##### Operator REST endpoints
The WebLogic Server Kubernetes Operator can expose both an internal and external REST HTTPS endpoint.
@@ -207,7 +226,7 @@ metadata:
rules:
- apiGroups: ["weblogic.oracle"]
resources: ["domains"]
- verbs: ["get", "list", "update"]
+ verbs: ["get", "list", "patch", update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["get", "list"]
diff --git a/docs-source/content/userguide/managing-domains/domain-lifecycle/startup.md b/docs-source/content/userguide/managing-domains/domain-lifecycle/startup.md
index 9154f966207..12154afe3f8 100644
--- a/docs-source/content/userguide/managing-domains/domain-lifecycle/startup.md
+++ b/docs-source/content/userguide/managing-domains/domain-lifecycle/startup.md
@@ -11,6 +11,7 @@ started, or restarted. To start, stop, or restart servers, modify these fields o
* [Starting and stopping servers](#starting-and-stopping-servers)
* [Common starting and stopping scenarios](#common-starting-and-stopping-scenarios)
+ * [Domain lifecycle sample scripts](#domain-lifecycle-sample-scripts)
* [Shutdown options](#shutdown-options)
* [Restarting servers](#restarting-servers)
* [Rolling restarts](#rolling-restarts)
@@ -82,6 +83,13 @@ updates before advancing the server to the running state.
Changes to the `serverStartState` property do not affect already started servers.
+### Domain lifecycle sample scripts
+Beginning in version 3.1.0, the operator provides sample scripts to start up or shut down a specific Managed Server or cluster in a deployed domain, or the entire deployed domain.
+
+**Note**: Prior to running these scripts, you must have previously created and deployed the domain.
+
+The scripts are located in the `kubernetes/samples/scripts/domain-lifecycle` directory. They are helpful when scripting the life cycle of a WebLogic Server domain. For more information, see the [README](https://github.com/oracle/weblogic-kubernetes-operator/tree/master/kubernetes/samples/scripts/domain-lifecycle/README.md).
+
### Common starting and stopping scenarios
#### Normal running state
diff --git a/docs-source/content/userguide/managing-domains/domain-resource.md b/docs-source/content/userguide/managing-domains/domain-resource.md
index b412d19d5dd..f2b5b3c4867 100644
--- a/docs-source/content/userguide/managing-domains/domain-resource.md
+++ b/docs-source/content/userguide/managing-domains/domain-resource.md
@@ -32,6 +32,7 @@ The following prerequisites must be fulfilled before proceeding with the creatio
* Create a Kubernetes Namespace for the Domain unless the intention is to use the default namespace.
* Make sure the WebLogic Server Kubernetes Operator is running and is configured to monitor the namespace.
* Make sure any resources that the domain resource references are deployed to the same namespace. For example, all domain resources have a `spec.webLogicCredentialsSecret` field that references a Kubernetes Secret containing the `username` and `password` of the WebLogic server administrative account.
+* Make sure a domain resource configuration and its corresponding WebLogic configuration [meet Kubernetes resource name restrictions]({{< relref "/userguide/managing-domains/_index.md#meet-kubernetes-resource-name-restrictions" >}}).
For example, see the [Quick Start]({{< relref "/quickstart/_index.md" >}}).
@@ -132,7 +133,7 @@ The Domain `spec` section contains elements for configuring the domain operation
Elements related to domain identification, container image, and domain home:
-* `domainUID`: Domain unique identifier. It is recommended that this value be unique to assist in future work to identify related domains in active-passive scenarios across data centers; however, it is only required that this value be unique within the namespace, similarly to the names of Kubernetes resources. This value is distinct and need not match the domain name from the WebLogic domain configuration. Defaults to the value of `metadata.name`.
+* `domainUID`: Domain unique identifier. This identifier is required to be no more than 45 characters, and practically, should be shorter in order to help ensure Kubernetes restrictions are met (for more details, see [Meet Kubernetes resource name restrictions]({{< relref "/userguide/managing-domains/_index.md#meet-kubernetes-resource-name-restrictions" >}})). It is recommended that this value be unique to assist in future work to identify related domains in active-passive scenarios across data centers; however, it is only required that this value be unique within the namespace, similarly to the names of Kubernetes resources. This value is distinct and need not match the domain name from the WebLogic domain configuration. Defaults to the value of `metadata.name`.
* `image`: The WebLogic container image; required when `domainHomeSourceType` is Image or FromModel; otherwise, defaults to container-registry.oracle.com/middleware/weblogic:12.2.1.4.
* `imagePullPolicy`: The image pull policy for the WebLogic container image. Legal values are Always, Never, and IfNotPresent. Defaults to Always if image ends in :latest; IfNotPresent, otherwise.
* `imagePullSecrets`: A list of image pull Secrets for the WebLogic container image.
@@ -143,7 +144,7 @@ Elements related to domain identification, container image, and domain home:
Elements related to logging:
* `includeServerOutInPodLog`: Specifies whether the server .out file will be included in the Pod's log. Defaults to true.
-* `logHome`: The directory in a server's container in which to store the domain, Node Manager, server logs, server *.out, and optionally HTTP access log files if `httpAccessLogInLogHome` is true. Ignored if `logHomeEnabled` is false.
+* `logHome`: The directory in a server's container in which to store the domain, Node Manager, server logs, server *.out, introspector .out, and optionally HTTP access log files if `httpAccessLogInLogHome` is true. Ignored if `logHomeEnabled` is false.
* `logHomeEnabled`: Specifies whether the log home folder is enabled. Defaults to true if `domainHomeSourceType` is PersistentVolume; false, otherwise.
* `httpAccessLogInLogHome`: Specifies whether the server HTTP access log files will be written to the same directory specified in `logHome`. Otherwise, server HTTP access log files will be written to the directory configured in the WebLogic domain configuration. Defaults to true.
@@ -154,20 +155,20 @@ Elements related to security:
Elements related to domain [startup and shutdown]({{< relref "/userguide/managing-domains/domain-lifecycle/startup.md" >}}):
-* `serverStartPolicy`: The strategy for [deciding whether to start]({{< relref "/userguide/managing-domains/domain-lifecycle/startup.md#starting-and-stopping-servers" >}}) a WebLogic Server instance. Legal values are ADMIN_ONLY, NEVER, or IF_NEEDED. Defaults to IF_NEEDED.
+* `serverStartPolicy`: The strategy for [deciding whether to start]({{< relref "/userguide/managing-domains/domain-lifecycle/startup#starting-and-stopping-servers" >}}) a WebLogic Server instance. Legal values are ADMIN_ONLY, NEVER, or IF_NEEDED. Defaults to IF_NEEDED.
* `serverStartState`: The WebLogic runtime state in which the server is to be started. Use ADMIN if the server should start in the admin state. Defaults to RUNNING.
-* `restartVersion`: Changes to this field cause the [operator to restart]({{< relref "/userguide/managing-domains/domain-lifecycle/startup.md#restarting-servers" >}}) WebLogic Server instances.
+* `restartVersion`: Changes to this field cause the [operator to restart]({{< relref "/userguide/managing-domains/domain-lifecycle/startup#restarting-servers" >}}) WebLogic Server instances.
* `replicas`: The default number of cluster member Managed Server instances to start for each WebLogic cluster in the domain configuration, unless `replicas` is specified for that cluster under the `clusters` field. For each cluster, the operator will sort cluster member Managed Server names from the WebLogic domain configuration by normalizing any numbers in the Managed Server name and then sorting alphabetically. This is done so that server names such as "managed-server10" come after "managed-server9". The operator will then start Managed Servers from the sorted list, up to the `replicas` count, unless specific Managed Servers are specified as starting in their entry under the `managedServers` field. In that case, the specified Managed Servers will be started and then additional cluster members will be started, up to the `replicas` count, by finding further cluster members in the sorted list that are not already started. If cluster members are started because of their entries under `managedServers`, then a cluster may have more cluster members running than its `replicas` count. Defaults to 0.
* `maxClusterConcurrentStartup`: The maximum number of cluster member Managed Server instances that the operator will start in parallel for a given cluster, if `maxConcurrentStartup` is not specified for a specific cluster under the `clusters` field. A value of 0 means there is no configured limit. Defaults to 0.
* `allowReplicasBelowMinDynClusterSize`: Whether to allow the number of running cluster member Managed Server instances to drop below the minimum dynamic cluster size configured in the WebLogic domain configuration, if this is not specified for a specific cluster under the `clusters` field. Defaults to true.
-* `introspectVersion`: Changes to this field cause the operator to repeat its introspection of the WebLogic domain configuration. Repeating introspection is required for the operator to recognize changes to the domain configuration, such as adding a new WebLogic cluster or Managed Server instance, to regenerate configuration overrides, or to regenerate the WebLogic domain home when the `domainHomeSourceType` is FromModel. Introspection occurs automatically, without requiring change to this field, when servers are first started or restarted after a full domain shut down. For the FromModel `domainHomeSourceType`, introspection also occurs when a running server must be restarted because of changes to any of the fields [listed here]({{< relref "/userguide/managing-domains/domain-lifecycle/startup.md#properties-that-cause-servers-to-be-restarted" >}}). See also `overridesConfigurationStrategy`.
+* `introspectVersion`: Changes to this field cause the operator to repeat its introspection of the WebLogic domain configuration (see [Initiating introspection]({{< relref "/userguide/managing-domains/domain-lifecycle/introspection/_index.md#initiating-introspection" >}})). Repeating introspection is required for the operator to recognize changes to the domain configuration, such as adding a new WebLogic cluster or Managed Server instance, to regenerate configuration overrides, or to regenerate the WebLogic domain home when the `domainHomeSourceType` is FromModel. Introspection occurs automatically, without requiring change to this field, when servers are first started or restarted after a full domain shut down. For the FromModel `domainHomeSourceType`, introspection also occurs when a running server must be restarted because of changes to any of the fields [listed here]({{< relref "/userguide/managing-domains/domain-lifecycle/startup#fields-that-cause-servers-to-be-restarted" >}}). See also `overrideDistributionStrategy`.
Elements related to specifying and overriding WebLogic domain configuration:
* These elements are under `configuration`.
* `overridesConfigMap`: The name of the ConfigMap for WebLogic [configuration overrides]({{< relref "/userguide/managing-domains/configoverrides/_index.md" >}}). If this field is specified, then the value of `spec.configOverrides` is ignored.
- * `overrideDistributionStrategy`: Determines how updated configuration overrides are distributed to already running WebLogic Server instances following introspection when the `domainHomeSourceType` is PersistentVolume or Image. Configuration overrides are generated during introspection from Secrets, the `overrideConfigMap` field, and WebLogic domain topology. Legal values are DYNAMIC, which means that the operator will distribute updated configuration overrides dynamically to running servers, and ON_RESTART, which means that servers will use updated configuration overrides only after the server's next restart. The selection of ON_RESTART will not cause servers to restart when there are updated configuration overrides available. See also `introspectVersion`. Defaults to DYNAMIC.
+ * `overrideDistributionStrategy`: Determines how updated configuration overrides are distributed to already running WebLogic Server instances following introspection when the `domainHomeSourceType` is PersistentVolume or Image. Configuration overrides are generated during introspection from Secrets, the `overridesConfigMap` field, and WebLogic domain topology. Legal values are DYNAMIC, which means that the operator will distribute updated configuration overrides dynamically to running servers, and ON_RESTART, which means that servers will use updated configuration overrides only after the server's next restart. The selection of ON_RESTART will not cause servers to restart when there are updated configuration overrides available. See also `introspectVersion`. Defaults to DYNAMIC.
* `secrets`: A list of names of the Secrets for WebLogic [configuration overrides]({{< relref "/userguide/managing-domains/configoverrides/_index.md" >}}) or model. If this field is specified, then the value of `spec.configOverrideSecrets` is ignored.
* `introspectorJobActiveDeadlineSeconds`: The introspector job timeout value in seconds. If this field is specified, then the operator's ConfigMap `data.introspectorJobActiveDeadlineSeconds` value is ignored. Defaults to 120 seconds.
diff --git a/docs-source/content/userguide/managing-domains/ingress/_index.md b/docs-source/content/userguide/managing-domains/ingress/_index.md
index 3543def2837..3f8bcbd42c6 100644
--- a/docs-source/content/userguide/managing-domains/ingress/_index.md
+++ b/docs-source/content/userguide/managing-domains/ingress/_index.md
@@ -68,6 +68,7 @@ Information about how to install and configure these to load balance WebLogic cl
- [Traefik guide](https://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/charts/traefik/README.md)
- [Voyager guide](https://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/charts/voyager/README.md)
+ - [NGINX guide](https://github.com/oracle/weblogic-kubernetes-operator/blob/master/kubernetes/samples/charts/nginx/README.md)
{{% notice note %}}
For production environments, we recommend NGINX, Voyager, Traefik (2.2.1 or later) ingress controllers, Apache, or the load balancer provided by your cloud provider.
diff --git a/docs-source/content/userguide/managing-domains/model-in-image/debugging.md b/docs-source/content/userguide/managing-domains/model-in-image/debugging.md
index 9d1257d1614..eac4a2c60f6 100644
--- a/docs-source/content/userguide/managing-domains/model-in-image/debugging.md
+++ b/docs-source/content/userguide/managing-domains/model-in-image/debugging.md
@@ -36,27 +36,27 @@ For example, assuming your domain UID is `sample-domain1` and your domain namesp
$ kubectl -n sample-domain1-ns get pods -l weblogic.domainUID=sample-domain1
NAME READY STATUS RESTARTS AGE
sample-domain1-admin-server 1/1 Running 0 19h
- sample-domain1-introspect-domain-job-v2l7k 0/1 Error 0 75m
+ sample-domain1-introspector-v2l7k 0/1 Error 0 75m
sample-domain1-managed-server1 1/1 Running 0 19h
sample-domain1-managed-server2 1/1 Running 0 19h
$ # let's look at the job's describe
- $ kubectl -n sample-domain1-ns describe job/sample-domain1-introspect-domain-job
+ $ kubectl -n sample-domain1-ns describe job/sample-domain1-introspector
...
$ # now let's look at the job's pod describe, in particular look at its 'events'
- $ kubectl -n sample-domain1-ns describe pod/sample-domain1-introspect-domain-job-v2l7k
+ $ kubectl -n sample-domain1-ns describe pod/sample-domain1-introspector-v2l7k
...
$ # finally let's look at job's pod's log
- $ kubectl -n sample-domain1-ns logs job/sample-domain1-introspect-domain-job
+ $ kubectl -n sample-domain1-ns logs job/sample-domain1-introspector
...
$ # alternative log command (will have same output as previous)
- # kubectl -n sample-domain1-ns logs pod/sample-domain1-introspect-domain-job-v2l7k
+ # kubectl -n sample-domain1-ns logs pod/sample-domain1-introspector-v2l7k
```
A common reason for the introspector job to fail is because of an error in a model file. Here's some sample log output from an introspector job that shows such a failure:
@@ -68,6 +68,11 @@ For example, assuming your domain UID is `sample-domain1` and your domain namesp
1. WLSDPLY-05007: Model file /u01/wdt/models/model1.yaml,/weblogic-operator/wdt-config-map/..2020_03_19_15_43_05.993607882/datasource.yaml contains an unrecognized section: TYPOresources. The recognized sections are domainInfo, topology, resources, appDeployments, kubernetes
```
+{{% notice tip %}}
+The introspector log is mirrored to the Domain resource `spec.logHome` directory
+when `spec.logHome` is configured and `spec.logHomeEnabled` is true.
+{{% /notice %}}
+
{{% notice tip %}}
If a model file error references a model file in your `spec.configuration.model.configMap`, then you can correct the error by redeploying the ConfigMap with a corrected model file and then initiating a domain restart or roll. Similarly, if a model file error references a model file in your model image, then you can correct the error by deploying a corrected image, modifying your Domain YAML file to reference the new image, and then initiating a domain restart or roll.
{{% /notice %}}
diff --git a/docs-source/content/userguide/managing-domains/model-in-image/runtime-updates.md b/docs-source/content/userguide/managing-domains/model-in-image/runtime-updates.md
index dad3a1deb3b..e372dcb2c14 100644
--- a/docs-source/content/userguide/managing-domains/model-in-image/runtime-updates.md
+++ b/docs-source/content/userguide/managing-domains/model-in-image/runtime-updates.md
@@ -61,64 +61,87 @@ No. Custom configuration overrides, which are WebLogic configuration overrides s
#### Supported and unsupported updates
- - You can add new MBeans or resources simply by specifying their corresponding model file YAML snippet along with their parent bean hierarchy. See [Example of adding a data source](#example-of-adding-a-data-source).
+{{% notice warning %}}
+The expected behavior is undefined when applying an unsupported update. If you need to make an unsupported update and no workaround is documented, then shut down your domain entirely before making the change.
+{{% /notice %}}
- - You can change or add secrets that your model references. For example, you can change a database password secret.
+##### Supported updates
- - You can change or add environment variables that your model macros reference (macros that use the `@@ENV:myenvvar@@` syntax).
+The following updates are *supported* except when they reference an area that is specifically documented as [unsupported](#unsupported-updates) below:
- - You can remove a named MBean, application deployment, or resource by specifying a model file with an exclamation point (`!`) just before its name. For example, if you have a data source named `mynewdatasource` defined in your model, it can be removed by specifying a small model file that loads after the model file that defines the data source, where the small model file looks like this:
+ - You can add a new WebLogic cluster or standalone server.
+
+ - You can add new MBeans or resources by specifying their corresponding model YAML file snippet along with their parent bean hierarchy. See [Example of adding a data source](#example-of-adding-a-data-source).
+
+ - You can change or add MBean attributes by specifying a YAML file snippet along with its parent bean hierarchy that references an existing MBean and the attribute. For example, to add or alter the maximum capacity of a data source named `mynewdatasource`:
```
resources:
JDBCSystemResource:
- !mynewdatasource:
+ mynewdatasource:
+ JdbcResource:
+ JDBCConnectionPoolParams:
+ MaxCapacity: 5
```
- For more information, see [Declaring Named MBeans to Delete](https://github.com/oracle/weblogic-deploy-tooling#declaring-named-mbeans-to-delete) in the WebLogic Deploying Tooling documentation.
+ For more information, see [Using Multiple Models](https://github.com/oracle/weblogic-deploy-tooling/blob/master/site/model.md#using-multiple-models) in the WebLogic Deploy Tooling documentation.
- - You can add or alter an MBean attribute by specifying a YAML snippet along with its parent bean hierarchy that references an existing MBean and the attribute. For example, to add or alter the maximum capacity of a data source named `mynewdatasource`:
+ - You can change or add secrets that your model macros reference (macros that use the `@@SECRET:secretname:secretkey@@` syntax). For example, you can change a database password secret.
+
+ - You can change or add environment variables that your model macros reference (macros that use the `@@ENV:myenvvar@@` syntax).
+
+ - You can remove a named MBean, application deployment, or resource by specifying a model file with an exclamation point (`!`) just before its name. For example, if you have a data source named `mynewdatasource` defined in your model, it can be removed by specifying a small model file that loads after the model file that defines the data source, where the small model file looks like this:
```
resources:
JDBCSystemResource:
- mynewdatasource:
- JdbcResource:
- JDBCConnectionPoolParams:
- MaxCapacity: 5
+ !mynewdatasource:
```
- For more information, see [Using Multiple Models](https://github.com/oracle/weblogic-deploy-tooling#using-multiple-models) in the WebLogic Deploy Tooling documentation.
+ For more information, see [Declaring Named MBeans to Delete](https://github.com/oracle/weblogic-deploy-tooling/blob/master/site/model.md#declaring-named-mbeans-to-delete) in the WebLogic Deploying Tooling documentation.
+
+##### Unsupported updates
- - There is no way to directly delete an attribute from an MBean that's already been specified by a model file. The work-around is to do this using two model files: add a model file that deletes the named bean/resource that is a parent to the attribute you want to delete, and add another model file that will be loaded after the first one, which fully defines the named bean/resource but without the attribute you want to delete.
+The following updates are *unsupported*. If you need to make an unsupported update and no workaround is documented, then shut down your domain entirely before making the change.
+
+ - There is no way to directly delete an attribute from an MBean that's already been specified by a model file. The workaround is to do this using two model files: add a model file that deletes the named bean/resource that is a parent to the attribute you want to delete, and add another model file that will be loaded after the first one, which fully defines the named bean/resource but without the attribute you want to delete.
- There is no way to directly change the MBean name of an attribute. Instead, you can remove a named MBean using the `!` syntax as described above, and then add a new one as a replacement.
- You cannot change the domain name at runtime.
- - The following types of runtime update configuration are _not_ supported in this release of Model in Image. If you need to make these kinds of updates, shut down your domain entirely before making the change:
- * Domain topology of an existing WebLogic cluster (cluster members)
- * Network channel listen address, port, and enabled configuration of an existing cluster or server
- * Server and domain log locations
- * Node Manager related configuration
- * Changing any existing MBean name
+ - You cannot change the topology of an existing WebLogic cluster. Specifically, do not apply runtime updates for:
+ - Dynamic cluster size
+ - Adding WebLogic Servers to a cluster or removing them
- Specifically, do not apply runtime updates for:
+ - You cannot change, add, or remove network listen address, port, protocol, and enabled configuration for existing clusters or servers at runtime.
- * Adding WebLogic Servers to a cluster, or removing them
- * Adding or removing Network Access Points (custom channels) for existing servers
- * Changing any of the following:
- * Dynamic cluster size
- * Default, SSL, and Admin channel `Enabled`, listen address, and port
- * Network Access Point (custom channel), listen address, or port
- * Server and domain log locations -- use the `logHome` domain setting instead
- * Node Manager access credentials
+ Specifically, do not apply runtime updates for:
+ - A Default, SSL, Admin channel `Enabled`, listen address, or port.
+ - A Network Access Point (custom channel) `Enabled`, listen address, protocol, or port.
Note that it is permitted to override network access point `public` or `external` addresses and ports. External access to JMX (MBean) or online WLST requires that the network access point internal port and external port match (external T3 or HTTP tunneling access to JMS, RMI, or EJBs don't require port matching).
-{{% notice warning %}}
-Due to security considerations, we strongly recommend that T3 or any RMI protocol should not be exposed outside the cluster.
-{{% /notice %}}
+ {{% notice warning %}}
+ Due to security considerations, we strongly recommend that T3 or any RMI protocol should not be exposed outside the cluster.
+ {{% /notice %}}
+
+ - You cannot change, add, or remove server and domain log related settings in an MBean at runtime when the domain resource is configured to override the same MBeans using the `spec.logHome`, `spec.logHomeEnabled`, or `spec.httpAccessLogInLogHome` attributes.
+
+ - You cannot change embedded LDAP security entries for [users, groups, roles](https://github.com/oracle/weblogic-deploy-tooling/blob/master/site/use_cases.md#modeling-weblogic-users-groups-and-roles), and [credential mappings](https://github.com/oracle/weblogic-deploy-tooling/blob/master/site/use_cases.md#modeling-weblogic-user-password-credential-mapping). For example, you cannot add a user to the default security realm. If you need to make these kinds of updates, then shut down your domain entirely before making the change, or switch to an [external security provider](https://github.com/oracle/weblogic-deploy-tooling/blob/master/site/use_cases.md#modeling-security-providers).
+
+ - The following summarizes the types of runtime update configuration that are _not_ supported in this release of Model in Image unless a workaround is documented:
+
+ * Domain topology of an existing WebLogic cluster. Specifically:
+ * Dynamic cluster size
+ * Adding WebLogic Servers to a cluster or removing them
+ * Default and custom network channel configuration for an existing WebLogic cluster or server. Specifically:
+ * Adding or removing Network Access Points (custom channels) for existing servers
+ * Changing a Default, SSL, Admin, or custom channel, `Enabled`, listen address, protocol, or port
+ * Node Manager related configuration
+ * Changing any existing MBean name
+ * Deleting an MBean attribute
+ * Embedded LDAP entries
#### Changing a Domain `restartVersion`
diff --git a/docs-source/content/userguide/managing-domains/model-in-image/usage.md b/docs-source/content/userguide/managing-domains/model-in-image/usage.md
index 652e6b1a54a..06dd0d5ac57 100644
--- a/docs-source/content/userguide/managing-domains/model-in-image/usage.md
+++ b/docs-source/content/userguide/managing-domains/model-in-image/usage.md
@@ -182,7 +182,7 @@ For a domain that has been started by Model in Image, the operator will copy the
```
- Option 2
- Alternatively, you can use the `kubernetes/samples/scripts/create-weblogic-domain/model-in-image/opss_wallet_util.sh -s` command to export the wallet file (pass `-?` to this script's command-line arguments and defaults).
+ Alternatively, you can use the `./kubernetes/samples/scripts/create-weblogic-domain/model-in-image/utils/opss-wallet.sh -s` command to export the wallet file (pass `-?` to this script's command-line arguments and defaults).
{{% notice tip %}}
Always back up your wallet file to a safe location that can be retrieved later. In addition, save your OPSS key password.
@@ -206,7 +206,7 @@ To reuse the wallet:
label secret MY_DOMAIN_UID-my-opss-wallet-file-secret \
weblogic.domainUID=sample-domain1
```
- Alternatively, you can use the `kubernetes/samples/scripts/create-weblogic-domain/model-in-image/opss_wallet_util.sh -r` command to deploy a local wallet file as a secret (pass `-?` to get this script's command-line arguments and defaults).
+ Alternatively, you can use the `./kubernetes/samples/scripts/create-weblogic-domain/model-in-image/utils/opss-wallet.sh -r` command to deploy a local wallet file as a secret (pass `-?` to get this script's command-line arguments and defaults).
- Make sure that your Domain YAML file `configuration.opss.walletPasswordSecret` field names the OPSS password Secret, and make sure that your Domain YAML file `configuration.opss.walletFileSecret` field names the OPSS wallet file secret.
diff --git a/docs-source/content/userguide/managing-operators/installation/_index.md b/docs-source/content/userguide/managing-operators/installation/_index.md
index ef2f920a1b6..9d9995b7038 100644
--- a/docs-source/content/userguide/managing-operators/installation/_index.md
+++ b/docs-source/content/userguide/managing-operators/installation/_index.md
@@ -5,9 +5,9 @@ weight: 1
---
-The operator uses Helm to create and deploy the necessary resources and
-then run the operator in a Kubernetes cluster. This document describes how to install, upgrade,
-and remove the operator.
+The operator uses Helm to create the necessary resources and
+then deploy the operator in a Kubernetes cluster. This document describes how to install, upgrade,
+and uninstall the operator.
#### Content
@@ -20,14 +20,14 @@ and remove the operator.
Use the `helm install` command to install the operator Helm chart. As part of this, you must specify a "release" name for the operator.
-You can override default configuration values in the operator Helm chart by doing one of the following:
+You can override default configuration values in the chart by doing one of the following:
- Creating a custom YAML file containing the values to be overridden, and specifying the `--value` option on the Helm command line.
- Overriding individual values directly on the Helm command line, using the `--set` option.
-You supply the `–namespace` argument from the `helm install` command line to specify the namespace in which the operator should be installed. If not specified, then it defaults to `default`. If the namespace does not already exist, then Helm will automatically create it (and create a default service account in the new namespace), but will not remove it when the release is deleted. If the namespace already exists, then Helm will re-use it. These are standard Helm behaviors.
+You supply the `–-namespace` argument from the `helm install` command line to specify the namespace in which the operator will be installed. If not specified, then it defaults to `default`. If the namespace does not already exist, then Helm will automatically create it (and create a default service account in the new namespace), but will not remove it when the release is uninstalled. If the namespace already exists, then Helm will use it. These are standard Helm behaviors.
-Similarly, you may override the default `serviceAccount` configuration value to specify which service account in the operator's namespace, the operator should use. If not specified, then it defaults to `default` (for example, the namespace's default service account). If you want to use a different service account, then you must create the operator's namespace and the service account before installing the operator Helm chart.
+Similarly, you may override the default `serviceAccount` configuration value to specify a service account in the operator's namespace, the operator will use. If not specified, then it defaults to `default` (for example, the namespace's default service account). If you want to use a different service account, then you must create the operator's namespace and the service account before installing the operator Helm chart.
For example, using Helm 3.x:
@@ -109,33 +109,35 @@ Then install the 3.x operator using the [installation](#install-the-operator-hel
The following instructions will be applicable to upgrade operators within the 3.x release family
as additional versions are released.
-To upgrade the operator, use the `helm upgrade` command. When upgrading the operator,
+To upgrade the operator, use the `helm upgrade` command. Make sure that the
+`weblogic-kubernetes-operator` repository on your local machine is at the
+operator release to which you are upgrading. When upgrading the operator,
the `helm upgrade` command requires that you supply a new Helm chart and image. For example:
```
$ helm upgrade \
--reuse-values \
- --set image=oracle/weblogic-kubernetes-operator:3.0.3 \
+ --set image=oracle/weblogic-kubernetes-operator:3.1.0 \
--namespace weblogic-operator-namespace \
--wait \
weblogic-operator \
kubernetes/charts/weblogic-operator
```
-#### Remove the operator
+#### Uninstall the operator
-The `helm delete` command is used to remove an operator release and its associated resources from the Kubernetes cluster. The release name used with the `helm delete` command is the same release name used with the `helm install` command (see [Install the Helm chart](#install-the-operator-helm-chart)). For example:
+The `helm uninstall` command is used to remove an operator release and its associated resources from the Kubernetes cluster. The release name used with the `helm uninstall` command is the same release name used with the `helm install` command (see [Install the Helm chart](#install-the-operator-helm-chart)). For example:
```
-$ helm delete weblogic-operator -n weblogic-operator-namespace
+$ helm uninstall weblogic-operator -n weblogic-operator-namespace
```
{{% notice note %}}
-If the operator's namespace did not exist before the Helm chart was installed, then Helm will create it, however, `helm delete` will not remove it.
+If the operator's namespace did not exist before the Helm chart was installed, then Helm will create it, however, `helm uninstall` will not remove it.
{{% /notice %}}
-After removing the operator deployment, you should also remove the domain custom resource definition:
+After removing the operator deployment, you should also remove the Domain custom resource definition (CRD):
```
$ kubectl delete customresourcedefinition domains.weblogic.oracle
```
-Note that the domain custom resource definition is shared if there are multiple operators in the same cluster.
+Note that the Domain custom resource definition is shared. Do not delete the CRD if there are other operators in the same cluster.
diff --git a/docs-source/content/userguide/managing-operators/using-the-operator/the-rest-api.md b/docs-source/content/userguide/managing-operators/using-the-operator/the-rest-api.md
index b4c6f5f975a..9e3a1884c9b 100644
--- a/docs-source/content/userguide/managing-operators/using-the-operator/the-rest-api.md
+++ b/docs-source/content/userguide/managing-operators/using-the-operator/the-rest-api.md
@@ -14,7 +14,12 @@ You can access most of the REST services using `GET`, for example:
* To obtain a list of domains, send a `GET` request to the URL `/operator/latest/domains`
* To obtain a list of clusters in a domain, send a `GET` request to the URL `/operator/latest/domains//clusters`
-All of the REST services require authentication. Callers must pass in a valid token header and a CA certificate file. Callers should pass in the `Accept:/application/json` header.
+All of the REST services require authentication. Callers must pass in a valid token header and a CA certificate file. In previous operator versions, the operator performed authentication and authorization checks using the Kubernetes token review and subject access review APIs, and then updated the Domain resource using the operator's privileges. Now, by default, the operator will use the caller's bearer token to perform the underlying update to the Domain resource using the caller's privileges and thus delegating authentication and authorization checks directly to the Kubernetes API Server (see [REST interface configuration]({{< relref "/userguide/managing-operators/using-the-operator/using-helm.md#rest-interface-configuration" >}})).
+{{% notice note %}}
+When using the operator's REST services to scale up or down a WebLogic cluster, you may need to grant `patch` access to the user or service account associated with the caller's bearer token. This can be done with an RBAC ClusterRoleBinding between the user or service account and the ClusterRole that defines the permissions for the WebLogic `domains` resource.
+{{% /notice %}}
+
+Callers should pass in the `Accept:/application/json` header.
To protect against Cross Site Request Forgery (CSRF) attacks, the operator REST API requires that you send in a `X-Requested-By` header when you invoke a REST endpoint that makes a change (for example, when you POST to the `/scale` endpoint). The value is an arbitrary name such as `MyClient`. For example, when using `curl`:
diff --git a/docs-source/content/userguide/managing-operators/using-the-operator/using-helm.md b/docs-source/content/userguide/managing-operators/using-the-operator/using-helm.md
index 59e4f5efab6..777bd8e3f9f 100644
--- a/docs-source/content/userguide/managing-operators/using-the-operator/using-helm.md
+++ b/docs-source/content/userguide/managing-operators/using-the-operator/using-helm.md
@@ -21,7 +21,7 @@ Note that the operator Helm chart is available from the GitHub chart repository,
#### Useful Helm operations
-Show the available operator configuration parameters and their default values:
+Show the available operator configuration values and their defaults:
```
$ helm inspect values kubernetes/charts/weblogic-operator
```
@@ -44,17 +44,17 @@ $ helm list --all-namespaces
Get the status of the operator Helm release:
```
-$ helm status weblogic-operator
+$ helm status weblogic-operator --namespace
```
Show the history of the operator Helm release:
```
-$ helm history weblogic-operator
+$ helm history weblogic-operator --namespace
```
Roll back to a previous version of this operator Helm release, in this case, the first version:
```
-$ helm rollback weblogic-operator 1
+$ helm rollback weblogic-operator 1 --namespace
```
Change one or more values in the operator Helm release. In this example, the `--reuse-values` flag indicates that previous overrides of other values should be retained:
@@ -68,16 +68,6 @@ $ helm upgrade \
kubernetes/charts/weblogic-operator
```
-Enable operator debugging on port 30999. Again, we use `--reuse-values` to change one value without affecting the others:
-```
-$ helm upgrade \
- --reuse-values \
- --set "remoteDebugNodePortEnabled=true" \
- --wait \
- weblogic-operator \
- kubernetes/charts/weblogic-operator
-```
-
### Operator Helm configuration values
This section describes the details of the operator Helm chart's available configuration values.
@@ -85,8 +75,8 @@ This section describes the details of the operator Helm chart's available config
#### Overall operator information
##### `serviceAccount`
-
Specifies the name of the service account in the operator's namespace that the operator will use to make requests to the Kubernetes API server. You are responsible for creating the service account.
+The `helm install` or `helm upgrade` command with a non-existing service account results in a Helm chart validation error.
Defaults to `default`.
@@ -95,25 +85,7 @@ Example:
serviceAccount: "weblogic-operator"
```
-##### `dedicated`
-
-Specifies if this operator will manage WebLogic domains only in the same namespace in which the operator itself is deployed. If set to `true`, then the `domainNamespaces` value is ignored.
-
-Defaults to `false`.
-
-Example:
-```
-dedicated: false
-```
-
-In the `dedicated` mode, the operator does not require permissions to access the cluster-scoped Kubernetes resources, such as `CustomResourceDefinitions`, `PersistentVolumes`, and `Namespaces`. In those situations, the operator may skip some of its operations, such as verifying the WebLogic domain `CustomResoruceDefinition` `domains.weblogic.oracle` (and creating it when it is absent), watching namespace events, and cleaning up `PersistentVolumes` as part of deleting a domain.
-
-{{% notice note %}}
-It is the responsibility of the administrator to make sure that the required `CustomResourceDefinition (CRD)` `domains.weblogic.oracle` is deployed in the Kubernetes cluster before the operator is installed. The creation of the `CRD` requires the Kubernetes `cluster-admin` privileges. A YAML file for creating the `CRD` can be found at [domain-crd.yaml](http://github.com/oracle/weblogic-kubernetes-operator/blob/develop/kubernetes/crd/domain-crd.yaml).
-{{% /notice %}}
-
##### `javaLoggingLevel`
-
Specifies the level of Java logging that should be enabled in the operator. Valid values are: `SEVERE`, `WARNING`, `INFO`, `CONFIG`, `FINE`, `FINER`, and `FINEST`.
Defaults to `INFO`.
@@ -126,14 +98,13 @@ javaLoggingLevel: "FINE"
#### Creating the operator pod
##### `image`
-
Specifies the Docker image containing the operator code.
-Defaults to `weblogic-kubernetes-operator:3.0.3`.
+Defaults to `oracle/weblogic-kubernetes-operator:3.1.0`.
Example:
```
-image: "weblogic-kubernetes-operator:LATEST"
+image: "oracle/weblogic-kubernetes-operator:some-tag"
```
##### `imagePullPolicy`
@@ -189,21 +160,61 @@ affinity:
- another-node-label-value
```
+##### `enableClusterRoleBinding`
+Specifies whether the roles necessary for the operator to manage domains
+will be granted using a ClusterRoleBinding rather than using RoleBindings in each managed namespace.
+
+Defaults to `false`.
+
+This option greatly simplifies managing namespaces when the selection is done using label selectors or
+regular expressions as the operator will already have privilege in any namespace.
+
+Customers who deploy the operator in Kubernetes clusters that run unrelated workloads will likely
+not want to use this option.
+
+If `enableClusterRoleBinding` is `false` and you select namespaces that the operator will
+manage using label selectors or a regular expression, then the Helm release will only include
+RoleBindings in each namespace that match at the time the Helm release is created. If you later
+create namespaces that the operator should manage, the new namespaces will not yet have the necessary
+RoleBinding.
+
+You can correct this by upgrading the Helm release and reusing values:
+```
+$ helm upgrade \
+ --reuse-values \
+ weblogic-operator \
+ kubernetes/charts/weblogic-operator
+```
+
#### WebLogic domain management
-##### `domainNamespaces`
+##### `domainNamespaceSelectionStrategy`
+Specifies how the operator will select the set of namespaces that it will manage.
+Legal values are: `List`, `LabelSelector`, `RegExp`, and `Dedicated`.
+
+Defaults to `List`.
-Specifies a list of WebLogic domain namespaces which the operator manages. The names must be lower case. You are responsible for creating these namespaces.
+If set to `List`, then the operator will manage the set of namespaces listed by the `domainNamespaces` value.
+If set to `LabelSelector`, then the operator will manage the set of namespaces discovered by a list
+of namespaces using the value specified by `domainNamespaceLabelSelector` as a label selector.
+If set to `RegExp`, then the operator will manage the set of namespaces discovered by a list
+of namespaces using the value specified by `domainNamespaceRegExp` as a regular expression matched
+against the namespace names.
+Finally, if set to `Dedicated`, then operator will manage WebLogic Domains only in the same namespace
+which the operator itself is deployed, which is the namespace of the Helm release.
-This property is required.
+##### `domainNamespaces`
+Specifies a list of namespaces that the operator manages. The names must be lowercase. You are responsible for creating these namespaces.
+The operator will only manage Domains found in these namespaces.
+This value is required if `domainNamespaceSelectionStrategy` is `List` and ignored otherwise.
-Example 1: In the configuration below, the operator will monitor the `default` Kubernetes Namespace:
+Example 1: In the configuration below, the operator will manage the `default` Kubernetes Namespace:
```
domainNamespaces:
- "default"
```
-Example 2: In the configuration below, the Helm installation will manage `namespace1` and `namespace2`:
+Example 2: In the configuration below, the operator will manage `namespace1` and `namespace2`:
```
domainNamespaces: [ "namespace1", "namespace2" ]
```
@@ -222,8 +233,73 @@ This value is ignored if `dedicated` is set to `true`. Then, the operator will m
For more information about managing `domainNamespaces`, see [Managing domain namespaces]({{< relref "/faq/namespace-management.md" >}}).
-##### `domainPresenceFailureRetryMaxCount` and `domainPresenceFailureRetrySeconds`
+##### `domainNamespaceLabelSelector`
+Specifies a [label selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) that will be used when searching for namespaces that the operator will manage.
+The operator will only manage Domains found in namespaces matching this selector.
+This value is required if `domainNamespaceSelectionStrategy` is `LabelSelector` and ignored otherwise.
+
+If `enableClusterRoleBinding` is `false`, the Helm chart will create RoleBindings in each namespace that matches the selector.
+These RoleBindings give the operator's service account the necessary privileges in the namespace. The Helm chart will only create
+these RoleBindings in namespaces that match the label selector at the time the chart is installed. If you later create namespaces
+that match the selector or label existing namespaces that make them now match the selector, then the operator will not have
+privilege in these namespaces until you upgrade the Helm release.
+
+Example 1: In the configuration below, the operator will manage namespaces that have the label "weblogic-operator"
+regardless of the value of that label:
+```
+domainNamespaceLabelSelector: weblogic-operator
+```
+
+Example 2: In the configuration below, the operator will manage all namespaces that have the label "environment",
+but where the value of that label is not "production" or "systemtest":
+```
+domainNamespaceLabelSelector: environment notin (production,systemtest)
+```
+
+{{% notice note %}}
+To specify the above sample on the Helm command line, escape spaces and commas as follows:
+```
+--set "domainNamespaceLabelSelector=environment\\ notin\\ (production\\,systemtest)"
+```
+{{% /notice %}}
+
+##### `domainNamespaceRegExp`
+Specifies a regular expression that will be used when searching for namespaces that the operator will manage.
+The operator will only manage Domains found in namespaces matching this regular expression.
+This value is required if `domainNamespaceSelectionStrategy` is `RegExp` and ignored otherwise.
+
+If `enableClusterRoleBinding` is `false`, the Helm chart will create RoleBindings in each namespace that matches the regular expression.
+These RoleBindings give the operator's service account the necessary privileges in the namespace. The Helm chart will only create
+these RoleBindings in namespaces that match the regular expression at the time the chart is installed. If you later create namespaces
+that match the selector or label existing namespaces that make them now match the selector, the operator will not have
+privilege in these namespaces until you upgrade the Helm release.
+
+{{% notice note %}}
+The regular expression functionality included with Helm is restricted to linear time constructs and,
+in particular, does not support lookarounds. The operator, written in Java, supports these
+complicated expressions. If you need to use a complex regular expression, then either set
+`enableClusterRoleBinding` to `true` or create the necessary RoleBindings outside of Helm.
+{{% /notice %}}
+
+##### `dedicated` ***(Deprecated)***
+Specifies if this operator will manage WebLogic domains only in the same namespace in which the operator itself is deployed. If set to `true`, then the `domainNamespaces` value is ignored.
+
+This field is deprecated. Use `domainNamespaceSelectionStrategy: Dedicated` instead.
+
+Defaults to `false`.
+Example:
+```
+dedicated: false
+```
+
+In the `dedicated` mode, the operator does not require permissions to access the cluster-scoped Kubernetes resources, such as `CustomResourceDefinitions`, `PersistentVolumes`, and `Namespaces`. In those situations, the operator may skip some of its operations, such as verifying the WebLogic domain `CustomResoruceDefinition` `domains.weblogic.oracle` (and creating it when it is absent), watching namespace events, and cleaning up `PersistentVolumes` as part of deleting a domain.
+
+{{% notice note %}}
+It is the responsibility of the administrator to make sure that the required `CustomResourceDefinition (CRD)` `domains.weblogic.oracle` is deployed in the Kubernetes cluster before the operator is installed. The creation of the `CRD` requires the Kubernetes `cluster-admin` privileges. A YAML file for creating the `CRD` can be found at [domain-crd.yaml](http://github.com/oracle/weblogic-kubernetes-operator/blob/develop/kubernetes/crd/domain-crd.yaml).
+{{% /notice %}}
+
+##### `domainPresenceFailureRetryMaxCount` and `domainPresenceFailureRetrySeconds`
Specify the number of introspector job retries for a Domain and the interval in seconds between these retries.
Defaults to 5 retries and 10 seconds between each retry.
@@ -234,10 +310,29 @@ domainPresenceFailureRetryMaxCount: 10
domainPresenceFailureRetrySeconds: 30
```
+##### `introspectorJobNameSuffix` and `externalServiceNameSuffix`
+Specify the suffixes that the operator uses to form the name of the Kubernetes job for the domain introspector, and the name of the external service for the WebLogic Administration Server, if the external service is enabled.
+
+Defaults to `-introspector` and `-ext` respectively. The values cannot be more than 25 and 10 characters respectively.
+
+{{% notice note %}}
+Prior to the operator 3.1.0 release, the suffixes are hard-coded to `-introspect-domain-job` and `-external`. The defaults are shortened in newer releases to support longer names in the domain resource and WebLogic domain configurations, such as the `domainUID`, and WebLogic cluster and server names.
+{{% /notice %}}
+
+{{% notice note %}}
+In order to work with Kubernetes limits to resource names, the resultant names for the domain introspector job and the external service should not be more than 63 characters (see [Meet Kubernetes resource name restrictions]({{< relref "/userguide/managing-domains/_index.md#meet-kubernetes-resource-name-restrictions" >}})).
+{{% /notice %}}
+
+##### `clusterSizePaddingValidationEnabled`
+Specifies if the operator needs to reserve additional padding when validating the server service names to account for longer Managed Server names as a result of expanding a cluster's size in WebLogic domain configurations.
+
+Defaults to `true`.
+
+If `clusterSizePaddingValidationEnabed` is set to true, two additional characters will be reserved if the configured cluster's size is between one and nine, and one additional character will be reserved if the configured cluster's size is between 10 and 99. No additional character is reserved if the configured cluster's size is greater than 99.
+
#### Elastic Stack integration
##### `elkIntegrationEnabled`
-
Specifies whether or not Elastic Stack integration is enabled.
Defaults to `false`.
@@ -248,7 +343,6 @@ elkIntegrationEnabled: true
```
##### `logStashImage`
-
Specifies the Docker image containing Logstash. This parameter is ignored if `elkIntegrationEnabled` is false.
Defaults to `logstash:6.6.0`.
@@ -269,7 +363,6 @@ elasticSearchHost: "elasticsearch2.default.svc.cluster.local"
```
##### `elasticSearchPort`
-
Specifies the port number where Elasticsearch is running. This parameter is ignored if `elkIntegrationEnabled` is false.
Defaults to `9200`.
@@ -306,7 +399,6 @@ externalRestHttpsPort: 32009
```
##### `externalRestIdentitySecret`
-
Specifies the user supplied secret that contains the SSL/TLS certificate and private key for the external operator REST HTTPS interface. The value must be the name of the Kubernetes `tls` secret previously created in the namespace where the operator is deployed. This parameter is required if `externalRestEnabled` is `true`, otherwise, it is ignored. In order to create the Kubernetes `tls` secret you can use the following command:
```
@@ -334,7 +426,6 @@ externalRestIdentitySecret: weblogic-operator-external-rest-identity
```
##### `externalOperatorCert` ***(Deprecated)***
-
{{% notice info %}}
Use **`externalRestIdentitySecret`** instead
{{% /notice %}}
@@ -357,7 +448,6 @@ externalOperatorCert: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUQwakNDQXJxZ0F3S
```
##### `externalOperatorKey` ***(Deprecated)***
-
{{% notice info %}}
Use **`externalRestIdentitySecret`** instead
{{% /notice %}}
@@ -378,11 +468,25 @@ Example:
```
externalOperatorKey: QmFnIEF0dHJpYnV0ZXMKICAgIGZyaWVuZGx5TmFtZTogd2VibG9naWMtb3B ...
```
-
+##### `tokenReviewAuthentication`
+If set to `true`, `tokenReviewAuthentication` specifies whether the the operator's REST API should use:
+ * Kubernetes token review API for authenticating users and
+ * Kubernetes subject access review API for authorizing a user's operation (`get`, `list`,
+ `patch`, and such) on a resource.
+ * Update the Domain resource using the operator's privileges.
+
+ If set to `false`, the operator's REST API will use the caller's bearer token for any update
+ to the Domain resource so that it is done using the caller's privileges.
+
+ Defaults to `false`.
+
+ Example:
+ ```
+ tokenReviewAuthentication: true
+ ```
#### Debugging options
##### `remoteDebugNodePortEnabled`
-
Specifies whether or not the operator will start a Java remote debug server on the provided port and suspend execution until a remote debugger has attached.
Defaults to `false`.
@@ -406,7 +510,6 @@ internalDebugHttpPort: 30888
```
##### `externalDebugHttpPort`
-
Specifies the node port that should be allocated for the Kubernetes cluster for the operator's Java remote debug server.
This parameter is required if `remoteDebugNodePortEnabled` is `true`. Otherwise, it is ignored.
@@ -488,34 +591,27 @@ Error: UPGRADE FAILED: Service "external-weblogic-operator-svc" is invalid: spec
#### Installing an operator and assigning it a service account that doesn't exist
-The `helm install` eventually times out and creates a failed release.
+The following `helm install` command fails because it tries to install an operator release with a non-existing service account `op2-sa`.
```
-$ helm install kubernetes/charts/weblogic-operator --name op2 --namespace myuser-op2-ns --values o24.yaml --wait --no-hooks
+$ helm install op2 kubernetes/charts/weblogic-operator --namespace myuser-op2-ns --set serviceAccount=op2-sa --wait --no-hooks
```
+The output contains the following error message.
+```
+ServiceAccount op2-sa not found in namespace myuser-op2-ns
+```
To recover:
-- `helm delete --purge` the failed release.
- Create the service account.
- `helm install` again.
#### Upgrading an operator and assigning it a service account that doesn't exist
-The `helm upgrade` succeeds and changes the service account on the existing operator deployment, but the existing deployment's pod doesn't get modified, so it keeps running. If the pod is deleted, the deployment creates another one using the OLD service account. However, there's an error in the deployment's status section saying that the service account doesn't exist.
-```
-lastTransitionTime: 2018-12-06T23:19:26Z
-lastUpdateTime: 2018-12-06T23:19:26Z
-message: 'pods "weblogic-operator-88bbb5896-" is forbidden: error looking up
-service account myuser-op2-ns/no-such-sa2: serviceaccount "no-such-sa2" not found'
-reason: FailedCreate
-status: "True"
-type: ReplicaFailure
-```
+The `helm upgrade` with a non-existing service account fails with the same error message as mentioned in the previous section, and the existing operator deployment stays unchanged.
To recover:
- Create the service account.
-- `helm rollback`
- `helm upgrade` again.
#### Installing an operator and having it manage a domain namespace that doesn't exist
diff --git a/docs-source/content/userguide/overview/k8s-setup.md b/docs-source/content/userguide/overview/k8s-setup.md
index 98a95c451b9..30f1eb688a7 100644
--- a/docs-source/content/userguide/overview/k8s-setup.md
+++ b/docs-source/content/userguide/overview/k8s-setup.md
@@ -208,7 +208,7 @@ These instructions are for Oracle Linux 7u2+. If you are using a different flav
1. Set an environment variable with the Docker version you want to install:
```
- docker_version="17.03.1.ce"
+ docker_version="18.09.1.ol"
```
1. Install Docker, removing any previously installed version:
@@ -273,7 +273,7 @@ These instructions are for Oracle Linux 7u2+. If you are using a different flav
setenforce 0
# install kube* packages
- v=${1:-1.8.4-0}
+ v=${1:-1.17.0-0}
old_ver=`echo $v | egrep "^1.7"`
yum install -y kubelet-$v kubeadm-$v kubectl-$v kubernetes-cni
diff --git a/docs/2.5.0/index.json b/docs/2.5.0/index.json
index 6030ee4b9d3..b876310b5cc 100644
--- a/docs/2.5.0/index.json
+++ b/docs/2.5.0/index.json
@@ -410,7 +410,7 @@
"title": "Model in image",
"tags": [],
"description": "Sample for supplying a WebLogic Deploy Tooling (WDT) model that the operator expands into a full domain home during runtime.",
- "content": " This feature is supported only in 3.0.0-rc1.\n Contents Introduction Model in Image domain types (WLS, JRF, and Restricted JRF) Use cases Sample directory structure Prerequisites for all domain types Additional prerequisites for JRF domains Initial use case: An initial WebLogic domain Update1 use case: Dynamically adding a data source using a model ConfigMap Cleanup References Introduction This sample demonstrates deploying a Model in Image domain home source type. Unlike Domain in PV and Domain in Image, Model in Image eliminates the need to pre-create your WebLogic domain home prior to deploying your domain resource. Instead, Model in Image uses a WebLogic Deploy Tooling (WDT) model to specify your WebLogic configuration.\nWDT models are a convenient and simple alternative to WebLogic WLST configuration scripts and templates. They compactly define a WebLogic domain using YAML files and support including application archives in a ZIP file. The WDT model format is described in the open source, WebLogic Deploy Tooling GitHub project, and the required directory structure for a WDT archive is specifically discussed here.\nFor more information on Model in Image, see the Model in Image user guide. For a comparison of Model in Image to other domain home source types, see Choose a domain home source type.\nModel in Image domain types (WLS, JRF, and Restricted JRF) There are three types of domains supported by Model in Image: a standard WLS domain, an Oracle Fusion Middleware Infrastructure Java Required Files (JRF) domain, and a RestrictedJRF domain. This sample demonstrates the WLS and JRF types.\nThe JRF domain path through the sample includes additional steps required for JRF: deploying an infrastructure database, initializing the database using the Repository Creation Utility (RCU) tool, referencing the infrastructure database from the WebLogic configuration, setting an Oracle Platform Security Services (OPSS) wallet password, and exporting/importing an OPSS wallet file. JRF domains may be used by Oracle products that layer on top of WebLogic Server, such as SOA and OSB. Similarly, RestrictedJRF domains may be used by Oracle layered products, such as Oracle Communications products.\nUse cases This sample demonstrates two Model in Image use cases:\n Initial: An initial WebLogic domain with the following characteristics:\n Image model-in-image:WLS-v1 with: A WebLogic installation A WebLogic Deploy Tooling (WDT) installation A WDT archive with version v1 of an exploded Java EE web application A WDT model with: A WebLogic Administration Server A WebLogic cluster A reference to the web application Kubernetes Secrets: WebLogic credentials Required WDT runtime password A domain resource with: spec.domainHomeSourceType: FromModel spec.image: model-in-image:WLS-v1 References to the secrets Update1: Demonstrates udpating the initial domain by dynamically adding a data source using a model ConfigMap:\n Image model-in-image:WLS-v1: Same image as Initial use case Kubernetes Secrets: Same as Initial use case plus secrets for data source credentials and URL Kubernetes ConfigMap with: A WDT model for a data source targeted to the cluster A domain resource with: Same as Initial use case plus: spec.model.configMap referencing the ConfigMap References to data source secrets Sample directory structure The sample contains the following files and directories:\n Location Description domain-resources JRF and WLS domain resources. archives Source code location for WebLogic Deploy Tooling application ZIP archives. model-images Staging for each model image\u0026rsquo;s WDT YAML, WDT properties, and WDT archive ZIP files. The directories in model images are named for their respective images. model-configmaps Staging files for a model ConfigMap that configures a data source. ingresses Ingress resources. utils/wl-pod-wait.sh Utility for watching the pods in a domain reach their expected restartVersion, image name, and ready state. utils/patch-restart-version.sh Utility for updating a running domain spec.restartVersion field (which causes it to \u0026lsquo;re-instrospect\u0026rsquo; and \u0026lsquo;roll\u0026rsquo;). utils/opss-wallet.sh Utility for exporting or importing a JRF domain OPSS wallet file. Prerequisites for all domain types Choose the type of domain you\u0026rsquo;re going to use throughout the sample, WLS or JRF.\n The first time you try this sample, we recommend that you choose WLS even if you\u0026rsquo;re familiar with JRF. This is because WLS is simpler and will more easily familiarize you with Model in Image concepts. We recommend choosing JRF only if you are already familiar with JRF, you have already tried the WLS path through this sample, and you have a definite use case where you need to use JRF. The JAVA_HOME environment variable must be set and must reference a valid JDK 8 or 11 installation.\n Get the operator source from the release/3.0.0-rc1 branch and put it in /tmp/operator-source.\nFor example:\n$ mkdir /tmp/operator-source $ cd /tmp/operator-source $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git $ git checkout release/3.0.0-rc1 Note: We will refer to the top directory of the operator source tree as /tmp/operator-source; however, you can use a different location.\n For additional information about obtaining the operator source, see the Developer Guide Requirements.\n Copy the sample to a new directory; for example, use directory /tmp/mii-sample.\n$ mkdir /tmp/mii-sample $ cp -r /tmp/operator-source/kubernetes/samples/scripts/create-weblogic-domain/model-in-image/* /tmp/mii-sample Note: We will refer to this working copy of the sample as /tmp/mii-sample; however, you can use a different location. Make sure an operator is set up to manage namespace sample-domain1-ns. Also, make sure a Traefik ingress controller is managing the same namespace and listening on port 30305.\nFor example, follow the same steps as the Quick Start guide from the beginning through to the Prepare for a domain step.\nMake sure you stop when you complete the \u0026ldquo;Prepare for a domain\u0026rdquo; step and then resume following these instructions.\n Set up ingresses that will redirect HTTP from Traefik port 30305 to the clusters in this sample\u0026rsquo;s WebLogic domains.\n Option 1: To create the ingresses, use the following YAML to create a file called /tmp/mii-sample/ingresses/myingresses.yaml and then call kubectl apply -f /tmp/mii-sample/ingresses/myingresses.yaml:\napiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain1-admin-server namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain1 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: http: paths: - path: /console backend: serviceName: sample-domain1-admin-server servicePort: 7001 --- apiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain1-cluster-cluster-1 namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain1 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: sample-domain1-cluster-cluster-1.mii-sample.org http: paths: - path: backend: serviceName: sample-domain1-cluster-cluster-1 servicePort: 8001 --- apiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain1-cluster-cluster-2 namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain1 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: sample-domain1-cluster-cluster-2.mii-sample.org http: paths: - path: backend: serviceName: sample-domain1-cluster-cluster-2 servicePort: 8001 --- apiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain2-cluster-cluster-1 namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain2 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: sample-domain2-cluster-cluster-1.mii-sample.org http: paths: - path: backend: serviceName: sample-domain2-cluster-cluster-1 servicePort: 8001 Option 2: Run kubectl apply -f on each of the ingress YAML files that are already included in the sample source /tmp/mii-sample/ingresses directory:\n $ cd /tmp/mii-sample/ingresses $ kubectl apply -f traefik-ingress-sample-domain1-admin-server.yaml $ kubectl apply -f traefik-ingress-sample-domain1-cluster-cluster-1.yaml $ kubectl apply -f traefik-ingress-sample-domain1-cluster-cluster-2.yaml $ kubectl apply -f traefik-ingress-sample-domain2-cluster-cluster-1.yaml $ kubectl apply -f traefik-ingress-sample-domain2-cluster-cluster-2.yaml NOTE: We give each cluster ingress a different host name that is decorated using both its operator domain UID and its cluster name. This makes each cluster uniquely addressable even when cluster names are the same across different clusters. When using curl to access the WebLogic domain through the ingress, you will need to supply a host name header that matches the host names in the ingress.\n For more on information ingresses and load balancers, see Ingress.\n Obtain the WebLogic 12.2.1.4 image that is required to create the sample\u0026rsquo;s model images.\na. Use a browser to access Oracle Container Registry.\nb. Choose an image location: for JRF domains, select Middleware, then fmw-infrastructure; for WLS domains, select Middleware, then weblogic.\nc. Select Sign In and accept the license agreement.\nd. Use your terminal to log in to Docker locally: docker login container-registry.oracle.com.\ne. Later in this sample, when you run WebLogic Image Tool commands, the tool will use the image as a base image for creating model images. Specifically, the tool will implicitly call docker pull for one of the above licensed images as specified in the tool\u0026rsquo;s command line using the --fromImage parameter. For JRF, this sample specifies container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4, and for WLS, the sample specifies container-registry.oracle.com/middleware/weblogic:12.2.1.4.\nIf you prefer, you can create your own base image and then substitute this image name in the WebLogic Image Tool --fromImage parameter throughout this sample. See Preparing a Base Image.\n Download the latest WebLogic Deploying Tooling and WebLogic Image Tool installer ZIP files to your /tmp/mii-sample/model-images directory.\nBoth WDT and WIT are required to create your Model in Image Docker images. Download the latest version of each tool\u0026rsquo;s installer ZIP file to the /tmp/mii-sample/model-images directory.\nFor example, visit the GitHub WebLogic Deploy Tooling Releses and WebLogic Image Tool Releases web pages to determine the latest release version for each, and then, assuming the version numbers are 1.8.0 and 1.8.4 respectively, call:\n$ curl -m 30 -fL https://github.com/oracle/weblogic-deploy-tooling/releases/download/weblogic-deploy-tooling-1.8.0/weblogic-deploy.zip \\ -o /tmp/mii-sample/model-images/weblogic-deploy.zip $ curl -m 30 -fL https://github.com/oracle/weblogic-image-tool/releases/download/release-1.8.4/imagetool.zip \\ -o /tmp/mii-sample/model-images/imagetool.zip Set up the WebLogic Image Tool.\nRun the following commands:\n$ cd /tmp/mii-sample/model-images $ unzip imagetool.zip $ ./imagetool/bin/imagetool.sh cache addInstaller \\ --type wdt \\ --version latest \\ --path /tmp/mii-sample/model-images/weblogic-deploy.zip These steps will install WIT to the /tmp/mii-sample/model-images/imagetool directory, plus put a wdt_latest entry in the tool\u0026rsquo;s cache which points to the WDT ZIP installer. We will use WIT later in the sample for creating model images.\n Additional prerequisites for JRF domains NOTE: If you\u0026rsquo;re using a WLS domain type, skip this section and continue here.\n JRF Prerequisites Contents Introduction to JRF setups Set up and initialize an infrastructure database Increase introspection job timeout Important considerations for RCU model attributes, domain resource attributes, and secrets Introduction to JRF setups NOTE: The requirements in this section are in addition to Prerequisites for all domain types.\n A JRF domain requires an infrastructure database, initializing this database with RCU, and configuring your domain to access this database. All of these steps must occur before you create your domain.\nSet up and initialize an infrastructure database A JRF domain requires an infrastructure database and also requires initializing this database with a schema and a set of tables. The following example shows how to set up a database and use the RCU tool to create the infrastructure schema for a JRF domain. The database is set up with the following attributes:\n Attribute Value database Kubernetes namespace default database Kubernetes pod oracle-db database image container-registry.oracle.com/database/enterprise:12.2.0.1-slim database password Oradoc_db1 infrastructure schema prefix FMW1 infrastructure schema password Oradoc_db1 database URL oracle-db.default.svc.cluster.local:1521/devpdb.k8s Ensure that you have access to the database image, and then create a deployment using it:\n Use a browser to log in to https://container-registry.oracle.com, select database-\u0026gt;enterprise and accept the license agreement.\n Get the database image:\n In the local shell, docker login container-registry.oracle.com. In the local shell, docker pull container-registry.oracle.com/database/enterprise:12.2.0.1-slim. Use the sample script in /tmp/operator-source/kubernetes/samples/scripts/create-oracle-db-service to create an Oracle database running in the pod, oracle-db.\n$ cd /tmp/operator-source/kubernetes/samples/scripts/create-oracle-db-service $ start-db-service.sh This script will deploy a database in the default namespace with the connect string oracle-db.default.svc.cluster.local:1521/devpdb.k8s, and administration password Oradoc_db1.\nThis step is based on the steps documented in Run a Database.\nWARNING: The Oracle Database Docker images are supported only for non-production use. For more details, see My Oracle Support note: Oracle Support for Database Running on Docker (Doc ID 2216342.1).\n Use the sample script in /tmp/operator-source/kubernetes/samples/scripts/create-rcu-schema to create the RCU schema with the schema prefix FMW1.\nNote that this script assumes Oradoc_db1 is the DBA password, Oradoc_db1 is the schema password, and that the database URL is oracle-db.default.svc.cluster.local:1521/devpdb.k8s.\n$ cd /tmp/operator-source/kubernetes/samples/scripts/create-rcu-schema $ ./create-rcu-schema.sh -s FMW1 -i container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4 NOTE: If you need to drop the repository, use this command:\n$ drop-rcu-schema.sh -s FMW1 Increase introspection job timeout The JRF domain home creation can take more time than the introspection job\u0026rsquo;s default timeout. You should increase the timeout for the introspection job. Use the configuration.introspectorJobActiveDeadlineSeconds in your domain resource to override the default with a value of at least 300 seconds (the default is 120 seconds). Note that the JRF versions of the domain resource files that are provided in /tmp/mii-sample/domain-resources already set this value.\nImportant considerations for RCU model attributes, domain resource attributes, and secrets To allow Model in Image to access the database and OPSS wallet, you must create an RCU access secret containing the database connect string, user name, and password that\u0026rsquo;s referenced from your model and an OPSS wallet password secret that\u0026rsquo;s referenced from your domain resource before deploying your domain. It\u0026rsquo;s also necessary to define an RCUDbInfo stanza in your model.\nThe sample includes examples of JRF models and domain resources in the /tmp/mii-sample/model-images and /tmp/mii-sample/domain-resources directories, and instructions in the following sections will describe setting up the RCU and OPSS secrets.\nWhen you follow the instructions later in this sample, avoid instructions that are WLS only, and substitute JRF for WLS in the corresponding model image tags and domain resource file names.\nFor example:\n JRF domain resources in this sample have an opss.walletPasswordSecret field that references a secret named sample-domain1-opss-wallet-password-secret, with password=welcome1.\n JRF image models in this sample have a domainInfo -\u0026gt; RCUDbInfo stanza that reference a sample-domain1-rcu-access secret with appropriate values for attributes rcu_prefix, rcu_schema_password, and rcu_db_conn_string for accessing the Oracle database that you deployed to the default namespace as one of the prerequisite steps.\n Important considerations for reusing or sharing OPSS tables We do not recommend that most users share OPSS tables. Extreme caution is required when sharing OPSS tables between domains.\n When you successfully deploy your JRF domain resource for the first time, the introspector job will initialize the OPSS tables for the domain using the domainInfo -\u0026gt; RCUDbInfo stanza in the WDT model plus the configuration.opss.walletPasswordSecret specified in the domain resource. The job will also create a new domain home. Finally, the operator will also capture an OPSS wallet file from the new domain\u0026rsquo;s local directory and place this file in a new Kubernetes ConfigMap.\nThere are scenarios when the domain needs to be recreated between updates, such as when WebLogic credentials are changed, security roles defined in the WDT model have been changed, or you want to share the same infrastructure tables with different domains. In these scenarios, the operator needs the walletPasswordSecret as well as the OPSS wallet file, together with the exact information in domainInfo -\u0026gt; RCUDbInfo so that the domain can be recreated and access the same set of tables. Without the wallet file and wallet password, you will not be able to recreate a domain accessing the same set of tables, therefore we strongly recommend that you back up the wallet file.\nTo recover a domain\u0026rsquo;s OPSS tables between domain restarts or to share an OPSS schema between different domains, it is necessary to extract this wallet file from the domain\u0026rsquo;s automatically deployed introspector ConfigMap and save the OPSS wallet password secret that was used for the original domain. The wallet password and wallet file are needed again when you recreate the domain or share the database with other domains.\nTo save the wallet file, assuming that your namespace is sample-domain1-ns and your domain UID is sample-domain1:\n $ kubectl -n sample-domain1-ns \\ get configmap sample-domain1-weblogic-domain-introspect-cm \\ -o jsonpath='{.data.ewallet\\.p12}' \\ \u0026gt; ./ewallet.p12 Alternatively, you can save the file using the sample\u0026rsquo;s wallet utility:\n $ /tmp/mii-sample/utils/opss-wallet.sh -n sample-domain1-ns -d sample-domain1 -wf ./ewallet.p12 # For help: /tmp/mii-sample/utils/opss-wallet.sh -? Important! Back up your wallet file to a safe location that can be retrieved later.\nTo reuse the wallet file in subsequent redeployments or to share the domain\u0026rsquo;s OPSS tables between different domains:\n Load the saved wallet file into a secret with a key named walletFile (again, assuming that your domain UID is sample-domain1 and your namespace is sample-domain1-ns): $ kubectl -n sample-domain1-ns create secret generic sample-domain1-opss-walletfile-secret \\ --from-file=walletFile=./ewallet.p12 $ kubectl -n sample-domain1-ns label secret sample-domain1-opss-walletfile-secret \\ weblogic.domainUID=`sample-domain1` Alternatively, use the sample\u0026rsquo;s wallet utility:\n $ /tmp/mii-sample/utils/opss-wallet.sh -n sample-domain1-ns -d sample-domain1 -wf ./ewallet.p12 -ws sample-domain1-opss-walletfile-secret # For help: /tmp/mii-sample/utils/opss-wallet.sh -? Modify your domain resource JRF YAML files to provide the wallet file secret name, for example: configuration: opss: # Name of secret with walletPassword for extracting the wallet walletPasswordSecret: sample-domain1-opss-wallet-password-secret # Name of secret with walletFile containing base64 encoded opss wallet walletFileSecret: sample-domain1-opss-walletfile-secret Note: The sample JRF domain resource files included in /tmp/mii-sample/domain-resources already have the above YAML stanza.\n Initial use case Contents Overview Image creation Image creation - Introduction Understanding our first archive Staging a ZIP file of the archive Staging model files Creating the image with WIT Deploy resources Deploy resources - Introduction Secrets Domain resource Overview In this use case, we set up an initial WebLogic domain. This involves:\n A WDT archive ZIP file that contains your applications. A WDT model that describes your WebLogic configuration. A Docker image that contains your WDT model files and archive. Creating secrets for the domain. Creating a domain resource for the domain that references your secrets and image. After the domain resource is deployed, the WebLogic operator will start an \u0026lsquo;introspector job\u0026rsquo; that converts your models into a WebLogic configuration, and then the operator will pass this configuration to each WebLogic Server in the domain.\nPerform the steps in Prerequisites for all domain types before performing the steps in this use case.\nIf you are taking the JRF path through the sample, then substitute JRF for WLS in your image names and directory paths. Also note that the JRF-v1 model YAML differs from the WLS-v1 YAML file (it contains an additional domainInfo -\u0026gt; RCUDbInfo stanza).\n Image creation - Introduction The goal of the initial use case \u0026lsquo;image creation\u0026rsquo; is to demonstrate using the WebLogic Image Tool to create an image named model-in-image:WLS-v1 from files that we will stage to /tmp/mii-sample/model-images/model-in-image:WLS-v1/. The staged files will contain a web application in a WDT archive, and WDT model configuration for a WebLogic Administration Server called admin-server and a WebLogic cluster called cluster-1.\nOverall, a Model in Image image must contain a WebLogic installation and also a WebLogic Deploy Tooling installation in its /u01/wdt/weblogic-deploy directory. In addition, if you have WDT model archive files, then the image must also contain these files in its /u01/wdt/models directory. Finally, an image may optionally also contain your WDT model YAML and properties files in the same /u01/wdt/models directory. If you do not specify WDT model YAML in your /u01/wdt/models directory, then the model YAML must be supplied dynamically using a Kubernetes ConfigMap that is referenced by your domain resource spec.model.configMap attribute. We will provide an example of using a model ConfigMap later in this sample.\nLet\u0026rsquo;s walk through the steps for creating the image model-in-image:WLS-v1:\n Understanding our first archive Staging a ZIP file of the archive Staging model files Creating the image with WIT Understanding our first archive The sample includes a predefined archive directory in /tmp/mii-sample/archives/archive-v1 that we will use to create an archive ZIP file for the image.\nThe archive top directory, named wlsdeploy, contains a directory named applications, which includes an \u0026lsquo;exploded\u0026rsquo; sample JSP web application in the directory, myapp-v1. Three useful aspects to remember about WDT archives are:\n A model image can contain multiple WDT archives. WDT archives can contain multiple applications, libraries, and other components. WDT archives have a well defined directory structure, which always has wlsdeploy as the top directory. If you are interested in the web application source, click here to see the JSP code. \u0026lt;%-- Copyright (c) 2019, 2020, Oracle Corporation and/or its affiliates. --%\u0026gt; \u0026lt;%-- Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. --%\u0026gt; \u0026lt;%@ page import=\u0026quot;javax.naming.InitialContext\u0026quot; %\u0026gt; \u0026lt;%@ page import=\u0026quot;javax.management.*\u0026quot; %\u0026gt; \u0026lt;%@ page import=\u0026quot;java.io.*\u0026quot; %\u0026gt; \u0026lt;% InitialContext ic = null; try { ic = new InitialContext(); String srName=System.getProperty(\u0026quot;weblogic.Name\u0026quot;); String domainUID=System.getenv(\u0026quot;DOMAIN_UID\u0026quot;); String domainName=System.getenv(\u0026quot;CUSTOM_DOMAIN_NAME\u0026quot;); out.println(\u0026quot;\u0026lt;html\u0026gt;\u0026lt;body\u0026gt;\u0026lt;pre\u0026gt;\u0026quot;); out.println(\u0026quot;*****************************************************************\u0026quot;); out.println(); out.println(\u0026quot;Hello World! This is version 'v1' of the mii-sample JSP web-app.\u0026quot;); out.println(); out.println(\u0026quot;Welcome to WebLogic server '\u0026quot; + srName + \u0026quot;'!\u0026quot;); out.println(); out.println(\u0026quot; domain UID = '\u0026quot; + domainUID +\u0026quot;'\u0026quot;); out.println(\u0026quot; domain name = '\u0026quot; + domainName +\u0026quot;'\u0026quot;); out.println(); MBeanServer mbs = (MBeanServer)ic.lookup(\u0026quot;java:comp/env/jmx/runtime\u0026quot;); // display the current server's cluster name Set\u0026lt;ObjectInstance\u0026gt; clusterRuntimes = mbs.queryMBeans(new ObjectName(\u0026quot;*:Type=ClusterRuntime,*\u0026quot;), null); out.println(\u0026quot;Found \u0026quot; + clusterRuntimes.size() + \u0026quot; local cluster runtime\u0026quot; + (String)((clusterRuntimes.size()!=1)?\u0026quot;s:\u0026quot;:\u0026quot;:\u0026quot;)); for (ObjectInstance clusterRuntime : clusterRuntimes) { String cName = (String)mbs.getAttribute(clusterRuntime.getObjectName(), \u0026quot;Name\u0026quot;); out.println(\u0026quot; Cluster '\u0026quot; + cName + \u0026quot;'\u0026quot;); } out.println(); // display local data sources ObjectName jdbcRuntime = new ObjectName(\u0026quot;com.bea:ServerRuntime=\u0026quot; + srName + \u0026quot;,Name=\u0026quot; + srName + \u0026quot;,Type=JDBCServiceRuntime\u0026quot;); ObjectName[] dataSources = (ObjectName[])mbs.getAttribute(jdbcRuntime, \u0026quot;JDBCDataSourceRuntimeMBeans\u0026quot;); out.println(\u0026quot;Found \u0026quot; + dataSources.length + \u0026quot; local data source\u0026quot; + (String)((dataSources.length!=1)?\u0026quot;s:\u0026quot;:\u0026quot;:\u0026quot;)); for (ObjectName dataSource : dataSources) { String dsName = (String)mbs.getAttribute(dataSource, \u0026quot;Name\u0026quot;); String dsState = (String)mbs.getAttribute(dataSource, \u0026quot;State\u0026quot;); out.println(\u0026quot; Datasource '\u0026quot; + dsName + \u0026quot;': State='\u0026quot; + dsState +\u0026quot;'\u0026quot;); } out.println(); out.println(\u0026quot;*****************************************************************\u0026quot;); } catch (Throwable t) { t.printStackTrace(new PrintStream(response.getOutputStream())); } finally { out.println(\u0026quot;\u0026lt;/pre\u0026gt;\u0026lt;/body\u0026gt;\u0026lt;/html\u0026gt;\u0026quot;); if (ic != null) ic.close(); } %\u0026gt; The application displays important details about the WebLogic Server that it\u0026rsquo;s running on: namely its domain name, cluster name, and server name, as well as the names of any data sources that are targeted to the server. You can also see that application output reports that it\u0026rsquo;s at version v1; we will update this to v2 in a future use case to demonstrate upgrading the application.\nStaging a ZIP file of the archive When we create our image, we will use the files in staging directory /tmp/mii-sample/model-in-image__WLS-v1. In preparation, we need it to contain a ZIP file of the WDT application archive.\nRun the following commands to create your application archive ZIP file and put it in the expected directory:\n# Delete existing archive.zip in case we have an old leftover version $ rm -f /tmp/mii-sample/model-images/model-in-image__WLS-v1/archive.zip # Move to the directory which contains the source files for our archive $ cd /tmp/mii-sample/archives/archive-v1 # Zip the archive to the location will later use when we run the WebLogic Image Tool $ zip -r /tmp/mii-sample/model-images/model-in-image__WLS-v1/archive.zip wlsdeploy Staging model files In this step, we explore the staged WDT model YAML file and properties in directory /tmp/mii-sample/model-in-image__WLS-v1. The model in this directory references the web application in our archive, configures a WebLogic Administration Server, and configures a WebLogic cluster. It consists of only two files, model.10.properties, a file with a single property, and, model.10.yaml, a YAML file with our WebLogic configuration model.10.yaml.\nCLUSTER_SIZE=5 Here is the WLS model.10.yaml:\ndomainInfo: AdminUserName: '@@SECRET:__weblogic-credentials__:username@@' AdminPassword: '@@SECRET:__weblogic-credentials__:password@@' ServerStartMode: 'prod' topology: Name: '@@ENV:CUSTOM_DOMAIN_NAME@@' AdminServerName: 'admin-server' Cluster: 'cluster-1': DynamicServers: ServerTemplate: 'cluster-1-template' ServerNamePrefix: 'managed-server' DynamicClusterSize: '@@PROP:CLUSTER_SIZE@@' MaxDynamicClusterSize: '@@PROP:CLUSTER_SIZE@@' MinDynamicClusterSize: '0' CalculatedListenPorts: false Server: 'admin-server': ListenPort: 7001 ServerTemplate: 'cluster-1-template': Cluster: 'cluster-1' ListenPort: 8001 appDeployments: Application: myapp: SourcePath: 'wlsdeploy/applications/myapp-v1' ModuleType: ear Target: 'cluster-1' Click here to expand the JRF `model.10.yaml`, and note the RCUDbInfo stanza and its references to a DOMAIN_UID-rcu-access secret. domainInfo: AdminUserName: '@@SECRET:__weblogic-credentials__:username@@' AdminPassword: '@@SECRET:__weblogic-credentials__:password@@' ServerStartMode: 'prod' RCUDbInfo: rcu_prefix: '@@SECRET:@@ENV:DOMAIN_UID@@-rcu-access:rcu_prefix@@' rcu_schema_password: '@@SECRET:@@ENV:DOMAIN_UID@@-rcu-access:rcu_schema_password@@' rcu_db_conn_string: '@@SECRET:@@ENV:DOMAIN_UID@@-rcu-access:rcu_db_conn_string@@' topology: AdminServerName: 'admin-server' Name: '@@ENV:CUSTOM_DOMAIN_NAME@@' Cluster: 'cluster-1': Server: 'admin-server': ListenPort: 7001 'managed-server1-c1-': Cluster: 'cluster-1' ListenPort: 8001 'managed-server2-c1-': Cluster: 'cluster-1' ListenPort: 8001 'managed-server3-c1-': Cluster: 'cluster-1' ListenPort: 8001 'managed-server4-c1-': Cluster: 'cluster-1' ListenPort: 8001 appDeployments: Application: myapp: SourcePath: 'wlsdeploy/applications/myapp-v1' ModuleType: ear Target: 'cluster-1' The model files:\n Define a WebLogic domain with:\n Cluster cluster-1 Administration Server admin-server A cluster-1 targeted ear application that\u0026rsquo;s located in the WDT archive ZIP file at wlsdeploy/applications/myapp-v1 Leverage macros to inject external values:\n The property file CLUSTER_SIZE property is referenced in the model YAML DynamicClusterSize and MaxDynamicClusterSize fields using a PROP macro. The model file domain name is injected using a custom environment variable named CUSTOM_DOMAIN_NAME using an ENV macro. We set this environment variable later in this sample using an env field in its domain resource. This conveniently provides a simple way to deploy multiple differently named domains using the same model image. The model file administrator user name and password are set using a weblogic-credentials secret macro reference to the WebLogic credential secret. This secret is in turn referenced using the weblogicCredentialsSecret field in the domain resource. The weblogic-credentials is a reserved name that always dereferences to the owning domain resource actual WebLogic credentials secret name. A Model in Image image can contain multiple properties files, archive ZIP files, and YAML files, but in this sample we use just one of each. For a full discussion of Model in Images model file naming conventions, file loading order, and macro syntax, see Model files in the Model in Image user documentation.\nCreating the image with WIT Note: If you are using JRF in this sample, substitute JRF for each occurrence of WLS in the imagetool command line below, plus substitute container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4 for the --fromImage value.\n At this point, we have staged all of the files needed for image model-in-image:WLS-v1, they include:\n /tmp/mii-sample/model-images/weblogic-deploy.zip /tmp/mii-sample/model-images/model-in-image__WLS-v1/model.10.yaml /tmp/mii-sample/model-images/model-in-image__WLS-v1/model.10.properties /tmp/mii-sample/model-images/model-in-image__WLS-v1/archive.zip If you don\u0026rsquo;t see the weblogic-deploy.zip file, then it means that you missed a step in the prerequisites.\nNow let\u0026rsquo;s use the Image Tool to create an image named model-in-image:WLS-v1 that\u0026rsquo;s layered on a base WebLogic image. We\u0026rsquo;ve already set up this tool during the prerequisite steps at the beginning of this sample.\nRun the following commands to create the model image and verify that it worked:\n$ cd /tmp/mii-sample/model-images $ ./imagetool/bin/imagetool.sh update \\ --tag model-in-image:WLS-v1 \\ --fromImage container-registry.oracle.com/middleware/weblogic:12.2.1.4 \\ --wdtModel ./model-in-image__WLS-v1/model.10.yaml \\ --wdtVariables ./model-in-image__WLS-v1/model.10.properties \\ --wdtArchive ./model-in-image__WLS-v1/archive.zip \\ --wdtModelOnly \\ --wdtDomainType WLS If you don\u0026rsquo;t see the imagetool directory, then it means that you missed a step in the prerequisites.\nThis command runs the WebLogic Image Tool in its Model in Image mode, and does the following:\n Builds the final Docker image as a layer on the container-registry.oracle.com/middleware/weblogic:12.2.1.4 base image. Copies the WDT ZIP file that\u0026rsquo;s referenced in the WIT cache into the image. Note that we cached WDT in WIT using the keyword latest when we set up the cache during the sample prerequisites steps. This lets WIT implicitly assume its the desired WDT version and removes the need to pass a -wdtVersion flag. Copies the specified WDT model, properties, and application archives to image location /u01/wdt/models. When the command succeeds, it should end with output like:\n[INFO ] Build successful. Build time=36s. Image tag=model-in-image:WLS-v1 Also, if you run the docker images command, then you should see a Docker image named model-in-image:WLS-v1.\nDeploy resources - Introduction In this section we will deploy our new image to namespace sample-domain1-ns, including the following steps:\n Create a secret containing your WebLogic administrator user name and password. Create a secret containing your Model in Image runtime encryption password: All Model in Image domains must supply a runtime encryption secret with a password value. It is used to encrypt configuration that is passed around internally by the operator. The value must be kept private but can be arbitrary; you can optionally supply a different secret value every time you restart the domain. If your domain type is JRF, create secrets containing your RCU access URL, credentials, and prefix. Deploy a domain resource YAML file that references the new image. Wait for the domain\u0026rsquo;s pods to start and reach their ready state. Secrets First, create the secrets needed by both WLS and JRF type model domains. In this case, we have two secrets.\nRun the following kubectl commands to deploy the required secrets:\n$ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-weblogic-credentials \\ --from-literal=username=weblogic --from-literal=password=welcome1 $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-weblogic-credentials \\ weblogic.domainUID=sample-domain1 $ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-runtime-encryption-secret \\ --from-literal=password=my_runtime_password $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-runtime-encryption-secret \\ weblogic.domainUID=sample-domain1 Some important details about these secrets:\n The WebLogic credentials secret:\n It is required and must contain username and password fields. It must be referenced by the spec.weblogicCredentialsSecret field in your domain resource. It also must be referenced by macros in the domainInfo.AdminUserName and domainInfo.AdminPassWord fields in your model YAML file. The Model WDT runtime secret:\n This is a special secret required by Model in Image. It must contain a password field. It must be referenced using the spec.model.runtimeEncryptionSecret attribute in its domain resource. It must remain the same for as long as the domain is deployed to Kubernetes, but can be changed between deployments. It is used to encrypt data as it\u0026rsquo;s internally passed using log files from the domain\u0026rsquo;s introspector job and on to its WebLogic Server pods. Deleting and recreating the secrets:\n We delete a secret before creating it, otherwise the create command will fail if the secret already exists. This allows us to change the secret when using the kubectl create secret command. We name and label secrets using their associated domain UID for two reasons:\n To make it obvious which secrets belong to which domains. To make it easier to clean up a domain. Typical cleanup scripts use the weblogic.domainUID label as a convenience for finding all resources associated with a domain. If you\u0026rsquo;re following the JRF path through the sample, then you also need to deploy the additional secret referenced by macros in the JRF model RCUDbInfo clause, plus an OPSS wallet password secret. For details about the uses of these secrets, see the Model in Image user documentation.\n Click here for the commands for deploying additional secrets for JRF. $ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-rcu-access \\ --from-literal=rcu_prefix=FMW1 \\ --from-literal=rcu_schema_password=Oradoc_db1 \\ --from-literal=rcu_db_conn_string=oracle-db.default.svc.cluster.local:1521/devpdb.k8s $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-rcu-access \\ weblogic.domainUID=sample-domain1 $ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-opss-wallet-password-secret \\ --from-literal=walletPassword=welcome1 $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-opss-wallet-password-secret \\ weblogic.domainUID=sample-domain1 Domain resource Now let\u0026rsquo;s create a domain resource. A domain resource is the key resource that tells the operator how to deploy a WebLogic domain.\nCopy the following to a file called /tmp/mii-sample/mii-initial.yaml or similar, or use the file /tmp/mii-sample/domain-resources/WLS/mii-initial-d1-WLS-v1.yaml that is included in the sample source.\n Click here to expand the WLS domain resource YAML. # # This is an example of how to define a Domain resource. # # If you are using 3.0.0-rc1, then the version on the following line # should be `v7` not `v6`. apiVersion: \u0026quot;weblogic.oracle/v6\u0026quot; kind: Domain metadata: name: sample-domain1 namespace: sample-domain1-ns labels: weblogic.resourceVersion: domain-v2 weblogic.domainUID: sample-domain1 spec: # Set to 'FromModel' to indicate 'Model in Image'. domainHomeSourceType: FromModel # The WebLogic Domain Home, this must be a location within # the image for 'Model in Image' domains. domainHome: /u01/domains/sample-domain1 # The WebLogic Server Docker image that the Operator uses to start the domain image: \u0026quot;model-in-image:WLS-v1\u0026quot; # Defaults to \u0026quot;Always\u0026quot; if image tag (version) is ':latest' imagePullPolicy: \u0026quot;IfNotPresent\u0026quot; # Identify which Secret contains the credentials for pulling an image #imagePullSecrets: #- name: regsecret # Identify which Secret contains the WebLogic Admin credentials, # the secret must contain 'username' and 'password' fields. webLogicCredentialsSecret: name: sample-domain1-weblogic-credentials # Whether to include the WebLogic server stdout in the pod's stdout, default is true includeServerOutInPodLog: true # Whether to enable overriding your log file location, see also 'logHome' #logHomeEnabled: false # The location for domain log, server logs, server out, and Node Manager log files # see also 'logHomeEnabled', 'volumes', and 'volumeMounts'. #logHome: /shared/logs/sample-domain1 # Set which WebLogic servers the Operator will start # - \u0026quot;NEVER\u0026quot; will not start any server in the domain # - \u0026quot;ADMIN_ONLY\u0026quot; will start up only the administration server (no managed servers will be started) # - \u0026quot;IF_NEEDED\u0026quot; will start all non-clustered servers, including the administration server, and clustered servers up to their replica count. serverStartPolicy: \u0026quot;IF_NEEDED\u0026quot; # Settings for all server pods in the domain including the introspector job pod serverPod: # Optional new or overridden environment variables for the domain's pods # - This sample uses CUSTOM_DOMAIN_NAME in its image model file # to set the Weblogic domain name env: - name: CUSTOM_DOMAIN_NAME value: \u0026quot;domain1\u0026quot; - name: JAVA_OPTIONS value: \u0026quot;-Dweblogic.StdoutDebugEnabled=false\u0026quot; - name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom \u0026quot; # Optional volumes and mounts for the domain's pods. See also 'logHome'. #volumes: #- name: weblogic-domain-storage-volume # persistentVolumeClaim: # claimName: sample-domain1-weblogic-sample-pvc #volumeMounts: #- mountPath: /shared # name: weblogic-domain-storage-volume # The desired behavior for starting the domain's administration server. adminServer: # The serverStartState legal values are \u0026quot;RUNNING\u0026quot; or \u0026quot;ADMIN\u0026quot; # \u0026quot;RUNNING\u0026quot; means the listed server will be started up to \u0026quot;RUNNING\u0026quot; mode # \u0026quot;ADMIN\u0026quot; means the listed server will be start up to \u0026quot;ADMIN\u0026quot; mode serverStartState: \u0026quot;RUNNING\u0026quot; # Setup a Kubernetes node port for the administration server default channel #adminService: # channels: # - channelName: default # nodePort: 30701 # The number of managed servers to start for unlisted clusters replicas: 1 # The desired behavior for starting a specific cluster's member servers clusters: - clusterName: cluster-1 serverStartState: \u0026quot;RUNNING\u0026quot; replicas: 2 # Change the `restartVersion` to force the introspector job to rerun # and apply any new model configuration, to also force a subsequent # roll of your domain's WebLogic pods. restartVersion: '1' configuration: # Settings for domainHomeSourceType 'FromModel' model: # Valid model domain types are 'WLS', 'JRF', and 'RestrictedJRF', default is 'WLS' domainType: \u0026quot;WLS\u0026quot; # Optional configmap for additional models and variable files #configMap: sample-domain1-wdt-config-map # All 'FromModel' domains require a runtimeEncryptionSecret with a 'password' field runtimeEncryptionSecret: sample-domain1-runtime-encryption-secret # Secrets that are referenced by model yaml macros # (the model yaml in the optional configMap or in the image) #secrets: #- sample-domain1-datasource-secret Click here to expand the JRF domain resource YAML. # Copyright (c) 2020, Oracle Corporation and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # # This is an example of how to define a Domain resource. # # If you are using 3.0.0-rc1, then the version on the following line # should be `v7` not `v6`. apiVersion: \u0026quot;weblogic.oracle/v6\u0026quot; kind: Domain metadata: name: sample-domain1 namespace: sample-domain1-ns labels: weblogic.resourceVersion: domain-v2 weblogic.domainUID: sample-domain1 spec: # Set to 'FromModel' to indicate 'Model in Image'. domainHomeSourceType: FromModel # The WebLogic Domain Home, this must be a location within # the image for 'Model in Image' domains. domainHome: /u01/domains/sample-domain1 # The WebLogic Server Docker image that the Operator uses to start the domain image: \u0026quot;model-in-image:JRF-v1\u0026quot; # Defaults to \u0026quot;Always\u0026quot; if image tag (version) is ':latest' imagePullPolicy: \u0026quot;IfNotPresent\u0026quot; # Identify which Secret contains the credentials for pulling an image #imagePullSecrets: #- name: regsecret # Identify which Secret contains the WebLogic Admin credentials, # the secret must contain 'username' and 'password' fields. webLogicCredentialsSecret: name: sample-domain1-weblogic-credentials # Whether to include the WebLogic server stdout in the pod's stdout, default is true includeServerOutInPodLog: true # Whether to enable overriding your log file location, see also 'logHome' #logHomeEnabled: false # The location for domain log, server logs, server out, and Node Manager log files # see also 'logHomeEnabled', 'volumes', and 'volumeMounts'. #logHome: /shared/logs/sample-domain1 # Set which WebLogic servers the Operator will start # - \u0026quot;NEVER\u0026quot; will not start any server in the domain # - \u0026quot;ADMIN_ONLY\u0026quot; will start up only the administration server (no managed servers will be started) # - \u0026quot;IF_NEEDED\u0026quot; will start all non-clustered servers, including the administration server, and clustered servers up to their replica count. serverStartPolicy: \u0026quot;IF_NEEDED\u0026quot; # Settings for all server pods in the domain including the introspector job pod serverPod: # Optional new or overridden environment variables for the domain's pods # - This sample uses CUSTOM_DOMAIN_NAME in its image model file # to set the Weblogic domain name env: - name: CUSTOM_DOMAIN_NAME value: \u0026quot;domain1\u0026quot; - name: JAVA_OPTIONS value: \u0026quot;-Dweblogic.StdoutDebugEnabled=false\u0026quot; - name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom \u0026quot; # Optional volumes and mounts for the domain's pods. See also 'logHome'. #volumes: #- name: weblogic-domain-storage-volume # persistentVolumeClaim: # claimName: sample-domain1-weblogic-sample-pvc #volumeMounts: #- mountPath: /shared # name: weblogic-domain-storage-volume # The desired behavior for starting the domain's administration server. adminServer: # The serverStartState legal values are \u0026quot;RUNNING\u0026quot; or \u0026quot;ADMIN\u0026quot; # \u0026quot;RUNNING\u0026quot; means the listed server will be started up to \u0026quot;RUNNING\u0026quot; mode # \u0026quot;ADMIN\u0026quot; means the listed server will be start up to \u0026quot;ADMIN\u0026quot; mode serverStartState: \u0026quot;RUNNING\u0026quot; # Setup a Kubernetes node port for the administration server default channel #adminService: # channels: # - channelName: default # nodePort: 30701 # The number of managed servers to start for unlisted clusters replicas: 1 # The desired behavior for starting a specific cluster's member servers clusters: - clusterName: cluster-1 serverStartState: \u0026quot;RUNNING\u0026quot; replicas: 2 # Change the restartVersion to force the introspector job to rerun # and apply any new model configuration, to also force a subsequent # roll of your domain's WebLogic pods. restartVersion: '1' configuration: # Settings for domainHomeSourceType 'FromModel' model: # Valid model domain types are 'WLS', 'JRF', and 'RestrictedJRF', default is 'WLS' domainType: \u0026quot;JRF\u0026quot; # Optional configmap for additional models and variable files #configMap: sample-domain1-wdt-config-map # All 'FromModel' domains require a runtimeEncryptionSecret with a 'password' field runtimeEncryptionSecret: sample-domain1-runtime-encryption-secret # Secrets that are referenced by model yaml macros # (the model yaml in the optional configMap or in the image) secrets: #- sample-domain1-datasource-secret - sample-domain1-rcu-access # Increase the introspector job active timeout value for JRF use cases introspectorJobActiveDeadlineSeconds: 300 opss: # Name of secret with walletPassword for extracting the wallet, used for JRF domains walletPasswordSecret: sample-domain1-opss-wallet-password-secret # Name of secret with walletFile containing base64 encoded opss wallet, used for JRF domains #walletFileSecret: sample-domain1-opss-walletfile-secret Run the following command to create the domain custom resource:\n$ kubectl apply -f /tmp/mii-sample/domain-resources/WLS/mii-initial-d1-WLS-v1.yaml Note: If you are choosing not to use the predefined domain resource YAML file and instead created your own domain resource file earlier, then substitute your custom file name in the above command. You might recall that we suggested naming it /tmp/mii-sample/mii-initial.yaml.\n If you run kubectl get pods -n sample-domain1-ns --watch, then you should see the introspector job run and your WebLogic Server pods start. The output should look something like this:\n Click here to expand. $ kubectl get pods -n sample-domain1-ns --watch NAME READY STATUS RESTARTS AGE sample-domain1-introspect-domain-job-lqqj9 0/1 Pending 0 0s sample-domain1-introspect-domain-job-lqqj9 0/1 ContainerCreating 0 0s sample-domain1-introspect-domain-job-lqqj9 1/1 Running 0 1s sample-domain1-introspect-domain-job-lqqj9 0/1 Completed 0 65s sample-domain1-introspect-domain-job-lqqj9 0/1 Terminating 0 65s sample-domain1-admin-server 0/1 Pending 0 0s sample-domain1-admin-server 0/1 ContainerCreating 0 0s sample-domain1-admin-server 0/1 Running 0 1s sample-domain1-admin-server 1/1 Running 0 32s sample-domain1-managed-server1 0/1 Pending 0 0s sample-domain1-managed-server2 0/1 Pending 0 0s sample-domain1-managed-server1 0/1 ContainerCreating 0 0s sample-domain1-managed-server2 0/1 ContainerCreating 0 0s sample-domain1-managed-server1 0/1 Running 0 2s sample-domain1-managed-server2 0/1 Running 0 2s sample-domain1-managed-server1 1/1 Running 0 43s sample-domain1-managed-server2 1/1 Running 0 42s Alternatively, you can run /tmp/mii-sample/utils/wl-pod-wait.sh -p 3. This is a utility script that provides useful information about a domain\u0026rsquo;s pods and waits for them to reach a ready state, reach their target restartVersion, and reach their target image before exiting.\n Click here to expand the `wl-pod-wait.sh` usage. $ ./wl-pod-wait.sh -? Usage: wl-pod-wait.sh [-n mynamespace] [-d mydomainuid] \\ [-p expected_pod_count] \\ [-t timeout_secs] \\ [-q] Exits non-zero if 'timeout_secs' is reached before 'pod_count' is reached. Parameters: -d \u0026lt;domain_uid\u0026gt; : Defaults to 'sample-domain1'. -n \u0026lt;namespace\u0026gt; : Defaults to 'sample-domain1-ns'. pod_count \u0026gt; 0 : Wait until exactly 'pod_count' WebLogic server pods for a domain all (a) are ready, (b) have the same 'domainRestartVersion' label value as the current domain resource's 'spec.restartVersion, and (c) have the same image as the current domain resource's image. pod_count = 0 : Wait until there are no running WebLogic server pods for a domain. The default. -t \u0026lt;timeout\u0026gt; : Timeout in seconds. Defaults to '600'. -q : Quiet mode. Show only a count of wl pods that have reached the desired criteria. -? : This help. Click here to expand sample output from `wl-pod-wait.sh`. @@ [2020-04-30T13:50:42][seconds=0] Info: Waiting up to 600 seconds for exactly '3' WebLogic server pods to reach the following criteria: @@ [2020-04-30T13:50:42][seconds=0] Info: ready='true' @@ [2020-04-30T13:50:42][seconds=0] Info: image='model-in-image:WLS-v1' @@ [2020-04-30T13:50:42][seconds=0] Info: domainRestartVersion='1' @@ [2020-04-30T13:50:42][seconds=0] Info: namespace='sample-domain1-ns' @@ [2020-04-30T13:50:42][seconds=0] Info: domainUID='sample-domain1' @@ [2020-04-30T13:50:42][seconds=0] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:50:42][seconds=0] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----- ----- --------- 'sample-domain1-introspect-domain-job-rkdkg' '' '' '' 'Pending' @@ [2020-04-30T13:50:45][seconds=3] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:50:45][seconds=3] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----- ----- --------- 'sample-domain1-introspect-domain-job-rkdkg' '' '' '' 'Running' @@ [2020-04-30T13:51:50][seconds=68] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:51:50][seconds=68] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE ---- ------- ----- ----- ----- @@ [2020-04-30T13:51:59][seconds=77] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:51:59][seconds=77] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE ----------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:52:02][seconds=80] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:52:02][seconds=80] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE ----------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'false' 'Running' @@ [2020-04-30T13:52:32][seconds=110] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:52:32][seconds=110] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'false' 'Pending' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:52:34][seconds=112] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:52:34][seconds=112] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'false' 'Running' @@ [2020-04-30T13:53:14][seconds=152] Info: '3' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:53:14][seconds=152] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:53:14][seconds=152] Info: Success! If you see an error, then consult Debugging in the Model in Image user guide.\nInvoke the web application Now that all the initial use case resources have been deployed, you can invoke the sample web application through the Traefik ingress controller\u0026rsquo;s NodePort. Note: The web application will display a list of any data sources it finds, but we don\u0026rsquo;t expect it to find any because the model doesn\u0026rsquo;t contain any at this point.\nSend a web application request to the load balancer:\n$ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp Or, if Traefik is unavailable and your Administration Server pod is running, you can use kubectl exec:\n$ kubectl exec -n sample-domain1-ns sample-domain1-admin-server -- bash -c \\ \u0026quot;curl -s -S -m 10 http://sample-domain1-cluster-cluster-1:8001/myapp_war/index.jsp\u0026quot; You should see output like the following:\n$ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp \u0026lt;html\u0026gt;\u0026lt;body\u0026gt;\u0026lt;pre\u0026gt; ***************************************************************** Hello World! This is version 'v1' of the mii-sample JSP web-app. Welcome to WebLogic server 'managed-server2'! domain UID = 'sample-domain1' domain name = 'domain1' Found 1 local cluster runtime: Cluster 'cluster-1' Found 0 local data sources: ***************************************************************** \u0026lt;/pre\u0026gt;\u0026lt;/body\u0026gt;\u0026lt;/html\u0026gt; Note: If you\u0026rsquo;re running your curl commands on a remote machine, then substitute localhost with an external address suitable for contacting your Kubernetes cluster. A Kubernetes cluster address that often works can be obtained by using the address just after https:// in the KubeDNS line of the output from the kubectl cluster-info command.\nIf you want to continue to the next use case, then leave your domain running.\nUpdate1 use case This use case demonstrates dynamically adding a data source to your running domain. It demonstrates several features of WDT and Model in Image:\n The syntax used for updating a model is exactly the same syntax you use for creating the original model. A domain\u0026rsquo;s model can be updated dynamically by supplying a model update in a file in a Kubernetes ConfigMap. Model updates can be as simple as changing the value of a single attribute, or more complex, such as adding a JMS Server. For a detailed discussion of model updates, see Runtime Updates in the Model in Image user guide.\nThe operator does not support all possible dynamic model updates. For model update limitations, consult Runtime Updates in the Model in Image user docs, and carefully test any model update before attempting a dynamic update in production.\n Here are the steps:\n Ensure that you have a running domain.\nMake sure you have deployed the domain from the Initial use case.\n Create a data source model YAML file.\nCreate a WDT model snippet for a data source (or use the example provided). Make sure that its target is set to cluster-1, and that its initial capacity is set to 0.\nThe reason for the latter is to prevent the data source from causing a WebLogic Server startup failure if it can\u0026rsquo;t find the database, which would be likely to happen because we haven\u0026rsquo;t deployed one (unless you\u0026rsquo;re using the JRF path through the sample).\nHere\u0026rsquo;s an example data source model configuration that meets these criteria:\nresources: JDBCSystemResource: mynewdatasource: Target: 'cluster-1' JdbcResource: JDBCDataSourceParams: JNDIName: [ jdbc/mydatasource1, jdbc/mydatasource2 ] GlobalTransactionsProtocol: TwoPhaseCommit JDBCDriverParams: DriverName: oracle.jdbc.xa.client.OracleXADataSource URL: '@@SECRET:@@ENV:DOMAIN_UID@@-datasource-secret:url@@' PasswordEncrypted: '@@SECRET:@@ENV:DOMAIN_UID@@-datasource-secret:password@@' Properties: user: Value: 'sys as sysdba' oracle.net.CONNECT_TIMEOUT: Value: 5000 oracle.jdbc.ReadTimeout: Value: 30000 JDBCConnectionPoolParams: InitialCapacity: 0 MaxCapacity: 1 TestTableName: SQL ISVALID TestConnectionsOnReserve: true Place the above model snippet in a file named /tmp/mii-sample/mydatasource.yaml and then use it in the later step where we deploy the model ConfigMap, or alternatively, use the same data source that\u0026rsquo;s provided in /tmp/mii-sample/model-configmaps/datasource/model.20.datasource.yaml.\n Create the data source secret.\nThe data source references a new secret that needs to be created. Run the following commands to create the secret:\n$ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-datasource-secret \\ --from-literal=password=Oradoc_db1 \\ --from-literal=url=jdbc:oracle:thin:@oracle-db.default.svc.cluster.local:1521/devpdb.k8s $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-datasource-secret \\ weblogic.domainUID=sample-domain1 We name and label secrets using their associated domain UID for two reasons:\n To make it obvious which secret belongs to which domains. To make it easier to clean up a domain. Typical cleanup scripts use the weblogic.domainUID label as a convenience for finding all the resources associated with a domain. Create a ConfigMap with the WDT model that contains the data source definition.\nRun the following commands:\n$ kubectl -n sample-domain1-ns create configmap sample-domain1-wdt-config-map \\ --from-file=/tmp/mii-sample/model-configmaps/datasource $ kubectl -n sample-domain1-ns label configmap sample-domain1-wdt-config-map \\ weblogic.domainUID=sample-domain1 If you\u0026rsquo;ve created your own data source file, then substitute the file name in the --from-file= parameter (we suggested /tmp/mii-sample/mydatasource.yaml earlier). Note that the -from-file= parameter can reference a single file, in which case it puts the designated file in the ConfigMap, or it can reference a directory, in which case it populates the ConfigMap with all of the files in the designated directory. We name and label ConfigMap using their associated domain UID for two reasons:\n To make it obvious which ConfigMap belong to which domains. To make it easier to cleanup a domain. Typical cleanup scripts use the weblogic.domainUID label as a convenience for finding all resources associated with a domain. Update your domain resource to refer to the ConfigMap and secret.\n Option 1: Update your current domain resource file from the \u0026ldquo;Initial\u0026rdquo; use case.\n Add the secret to its spec.configuration.secrets stanza:\nspec: ... configuration: ... secrets: - sample-domain1-datasource-secret (Leave any existing secrets in place.)\n Change its spec.configuration.model.configMap to look like:\nspec: ... configuration: ... model: ... configMap: sample-domain1-wdt-config-map Apply your changed domain resource:\n$ kubectl apply -f your-domain-resource.yaml Option 2: Use the updated domain resource file that is supplied with the sample:\n$ kubectl apply -f /tmp/miisample/domain-resources/mii-update1-d1-WLS-v1-ds.yaml Restart (\u0026lsquo;roll\u0026rsquo;) the domain.\nNow that the data source is deployed in a ConfigMap and its secret is also deployed, and we have applied an updated domain resource with its spec.configuration.model.configMap and spec.configuration.secrets referencing the ConfigMap and secret, let\u0026rsquo;s tell the operator to roll the domain.\nWhen a model domain restarts, it will rerun its introspector job in order to regenerate its configuration, and it will also pass the configuration changes found by the introspector to each restarted server. One way to cause a running domain to restart is to change the domain\u0026rsquo;s spec.restartVersion. To do this:\n Option 1: Edit your domain custom resource.\n Call kubectl -n sample-domain1-ns edit domain sample-domain1. Edit the value of the spec.restartVersion field and save. The field is a string; typically, you use a number in this field and increment it with each restart. Option 2: Dynamically change your domain using kubectl patch.\n To get the current restartVersion call:\n$ kubectl -n sample-domain1-ns get domain sample-domain1 '-o=jsonpath={.spec.restartVersion}' Choose a new restart version that\u0026rsquo;s different from the current restart version.\n The field is a string; typically, you use a number in this field and increment it with each restart. Use kubectl patch to set the new value. For example, assuming the new restart version is 2:\n$ kubectl -n sample-domain1-ns patch domain sample-domain1 --type=json '-p=[{\u0026quot;op\u0026quot;: \u0026quot;replace\u0026quot;, \u0026quot;path\u0026quot;: \u0026quot;/spec/restartVersion\u0026quot;, \u0026quot;value\u0026quot;: \u0026quot;2\u0026quot; }]' Option 3: Use the sample helper script.\n Call /tmp/mii-sample/utils/patch-restart-version.sh -n sample-domain1-ns -d sample-domain1. This will perform the same kubectl get and kubectl patch commands as Option 2. Wait for the roll to complete.\nNow that you\u0026rsquo;ve started a domain roll, you\u0026rsquo;ll need to wait for it to complete if you want to verify that the data source was deployed.\n One way to do this is to call kubectl get pods -n sample-domain1-ns --watch and wait for the pods to cycle back to their ready state.\n Alternatively, you can run /tmp/mii-sample/utils/wl-pod-wait.sh -p 3. This is a utility script that provides useful information about a domain\u0026rsquo;s pods and waits for them to reach a ready state, reach their target restartVersion, and reach their target image before exiting.\n Click here to expand the `wl-pod-wait.sh` usage. $ ./wl-pod-wait.sh -? Usage: wl-pod-wait.sh [-n mynamespace] [-d mydomainuid] \\ [-p expected_pod_count] \\ [-t timeout_secs] \\ [-q] Exits non-zero if 'timeout_secs' is reached before 'pod_count' is reached. Parameters: -d \u0026lt;domain_uid\u0026gt; : Defaults to 'sample-domain1'. -n \u0026lt;namespace\u0026gt; : Defaults to 'sample-domain1-ns'. pod_count \u0026gt; 0 : Wait until exactly 'pod_count' WebLogic server pods for a domain all (a) are ready, (b) have the same 'domainRestartVersion' label value as the current domain resource's 'spec.restartVersion, and (c) have the same image as the current domain resource's image. pod_count = 0 : Wait until there are no running WebLogic server pods for a domain. The default. -t \u0026lt;timeout\u0026gt; : Timeout in seconds. Defaults to '600'. -q : Quiet mode. Show only a count of wl pods that have reached the desired criteria. -? : This help. Click here to expand sample output from `wl-pod-wait.sh` that shows a rolling domain. @@ [2020-04-30T13:53:19][seconds=0] Info: Waiting up to 600 seconds for exactly '3' WebLogic server pods to reach the following criteria: @@ [2020-04-30T13:53:19][seconds=0] Info: ready='true' @@ [2020-04-30T13:53:19][seconds=0] Info: image='model-in-image:WLS-v1' @@ [2020-04-30T13:53:19][seconds=0] Info: domainRestartVersion='2' @@ [2020-04-30T13:53:19][seconds=0] Info: namespace='sample-domain1-ns' @@ [2020-04-30T13:53:19][seconds=0] Info: domainUID='sample-domain1' @@ [2020-04-30T13:53:19][seconds=0] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:53:19][seconds=0] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-introspect-domain-job-wlkpr' '' '' '' 'Pending' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:53:20][seconds=1] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:53:20][seconds=1] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-introspect-domain-job-wlkpr' '' '' '' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:18][seconds=59] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:18][seconds=59] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----------------------- ------ ----------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-introspect-domain-job-wlkpr' '' '' '' 'Succeeded' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:19][seconds=60] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:19][seconds=60] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:31][seconds=72] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:31][seconds=72] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:40][seconds=81] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:40][seconds=81] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:52][seconds=93] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:52][seconds=93] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:58][seconds=99] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:58][seconds=99] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Pending' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:00][seconds=101] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:00][seconds=101] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:12][seconds=113] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:12][seconds=113] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:24][seconds=125] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:24][seconds=125] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:33][seconds=134] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:33][seconds=134] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:34][seconds=135] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:34][seconds=135] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'false' 'Pending' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:40][seconds=141] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:40][seconds=141] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:44][seconds=145] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:44][seconds=145] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:56:25][seconds=186] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:25][seconds=186] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:56:26][seconds=187] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:26][seconds=187] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:56:30][seconds=191] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:30][seconds=191] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:56:34][seconds=195] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:34][seconds=195] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '2' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:57:09][seconds=230] Info: '3' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:57:09][seconds=230] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '2' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:57:09][seconds=230] Info: Success! After your domain is running, you can call the sample web application to determine if the data source was deployed.\nSend a web application request to the ingress controller:\n$ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp Or, if Traefik is unavailable and your Administration Server pod is running, you can run kubectl exec:\n$ kubectl exec -n sample-domain1-ns sample-domain1-admin-server -- bash -c \\ \u0026quot;curl -s -S -m 10 http://sample-domain1-cluster-cluster-1:8001/myapp_war/index.jsp\u0026quot; You should see something like the following:\n Click here to see the expected web application output. $ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp \u0026lt;html\u0026gt;\u0026lt;body\u0026gt;\u0026lt;pre\u0026gt; ***************************************************************** Hello World! This is version 'v1' of the mii-sample JSP web-app. Welcome to WebLogic server 'managed-server1'! domain UID = 'sample-domain1' domain name = 'domain1' Found 1 local cluster runtime: Cluster 'cluster-1' Found 1 local data source: Datasource 'mynewdatasource': State='Running' ***************************************************************** \u0026lt;/pre\u0026gt;\u0026lt;/body\u0026gt;\u0026lt;/html\u0026gt; If you see an error, then consult Debugging in the Model in Image user guide.\nThis completes the sample scenarios.\nCleanup To remove the resources you have created in these samples:\n Delete the domain resources.\n$ /tmp/operator-source/kubernetes/samples/scripts/delete-domain/delete-weblogic-domain-resources.sh -d sample-domain1 $ /tmp/operator-source/kubernetes/samples/scripts/delete-domain/delete-weblogic-domain-resources.sh -d sample-domain2 This deletes the domain and any related resources that are labeled with the domain UID sample-domain1 and sample-domain2.\nIt leaves the namespace intact, the operator running, the load balancer running (if installed), and the database running (if installed).\n Note: When you delete a domain, the operator should detect your domain deletion and shut down its pods. Wait for these pods to exit before deleting the operator that monitors the sample-domain1-ns namespace. You can monitor this process using the command kubectl get pods -n sample-domain1-ns --watch (ctrl-c to exit).\n If you set up the Traefik ingress controller:\n$ helm delete --purge traefik-operator $ kubectl delete namespace traefik If you set up a database for JRF:\n$ /tmp/operator-source/kubernetes/samples/scripts/create-oracle-db-service/stop-db-service.sh Delete the operator and its namespace:\n$ helm delete --purge sample-weblogic-operator $ kubectl delete namespace sample-weblogic-operator-ns Delete the domain\u0026rsquo;s namespace:\n$ kubectl delete namespace sample-domain1-ns Delete the images you may have created in this sample:\n$ docker image rm model-in-image:WLS-v1 $ docker image rm model-in-image:WLS-v2 $ docker image rm model-in-image:JRF-v1 $ docker image rm model-in-image:JRF-v2 References For references to the relevant user documentation, see:\n Model in Image user documentation Oracle WebLogic Server Deploy Tooling Oracle WebLogic Image Tool "
+ "content": " This feature is supported only in 3.0.0-rc1.\n Contents Introduction Model in Image domain types (WLS, JRF, and Restricted JRF) Use cases Sample directory structure Prerequisites for all domain types Additional prerequisites for JRF domains Initial use case: An initial WebLogic domain Update1 use case: Dynamically adding a data source using a model ConfigMap Cleanup References Introduction This sample demonstrates deploying a Model in Image domain home source type. Unlike Domain in PV and Domain in Image, Model in Image eliminates the need to pre-create your WebLogic domain home prior to deploying your domain resource. Instead, Model in Image uses a WebLogic Deploy Tooling (WDT) model to specify your WebLogic configuration.\nWDT models are a convenient and simple alternative to WebLogic WLST configuration scripts and templates. They compactly define a WebLogic domain using YAML files and support including application archives in a ZIP file. The WDT model format is described in the open source, WebLogic Deploy Tooling GitHub project, and the required directory structure for a WDT archive is specifically discussed here.\nFor more information on Model in Image, see the Model in Image user guide. For a comparison of Model in Image to other domain home source types, see Choose a domain home source type.\nModel in Image domain types (WLS, JRF, and Restricted JRF) There are three types of domains supported by Model in Image: a standard WLS domain, an Oracle Fusion Middleware Infrastructure Java Required Files (JRF) domain, and a RestrictedJRF domain. This sample demonstrates the WLS and JRF types.\nThe JRF domain path through the sample includes additional steps required for JRF: deploying an infrastructure database, initializing the database using the Repository Creation Utility (RCU) tool, referencing the infrastructure database from the WebLogic configuration, setting an Oracle Platform Security Services (OPSS) wallet password, and exporting/importing an OPSS wallet file. JRF domains may be used by Oracle products that layer on top of WebLogic Server, such as SOA and OSB. Similarly, RestrictedJRF domains may be used by Oracle layered products, such as Oracle Communications products.\nUse cases This sample demonstrates two Model in Image use cases:\n Initial: An initial WebLogic domain with the following characteristics:\n Image model-in-image:WLS-v1 with: A WebLogic installation A WebLogic Deploy Tooling (WDT) installation A WDT archive with version v1 of an exploded Java EE web application A WDT model with: A WebLogic Administration Server A WebLogic cluster A reference to the web application Kubernetes Secrets: WebLogic credentials Required WDT runtime password A domain resource with: spec.domainHomeSourceType: FromModel spec.image: model-in-image:WLS-v1 References to the secrets Update1: Demonstrates udpating the initial domain by dynamically adding a data source using a model ConfigMap:\n Image model-in-image:WLS-v1: Same image as Initial use case Kubernetes Secrets: Same as Initial use case plus secrets for data source credentials and URL Kubernetes ConfigMap with: A WDT model for a data source targeted to the cluster A domain resource with: Same as Initial use case plus: spec.model.configMap referencing the ConfigMap References to data source secrets Sample directory structure The sample contains the following files and directories:\n Location Description domain-resources JRF and WLS domain resources. archives Source code location for WebLogic Deploy Tooling application ZIP archives. model-images Staging for each model image\u0026rsquo;s WDT YAML, WDT properties, and WDT archive ZIP files. The directories in model images are named for their respective images. model-configmaps Staging files for a model ConfigMap that configures a data source. ingresses Ingress resources. utils/wl-pod-wait.sh Utility for watching the pods in a domain reach their expected restartVersion, image name, and ready state. utils/patch-restart-version.sh Utility for updating a running domain spec.restartVersion field (which causes it to \u0026lsquo;re-instrospect\u0026rsquo; and \u0026lsquo;roll\u0026rsquo;). utils/opss-wallet.sh Utility for exporting or importing a JRF domain OPSS wallet file. Prerequisites for all domain types Choose the type of domain you\u0026rsquo;re going to use throughout the sample, WLS or JRF.\n The first time you try this sample, we recommend that you choose WLS even if you\u0026rsquo;re familiar with JRF. This is because WLS is simpler and will more easily familiarize you with Model in Image concepts. We recommend choosing JRF only if you are already familiar with JRF, you have already tried the WLS path through this sample, and you have a definite use case where you need to use JRF. The JAVA_HOME environment variable must be set and must reference a valid JDK 8 or 11 installation.\n Get the operator source from the release/3.0.0-rc1 branch and put it in /tmp/operator-source.\nFor example:\n$ mkdir /tmp/operator-source $ cd /tmp/operator-source $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git $ git checkout release/3.0.0-rc1 Note: We will refer to the top directory of the operator source tree as /tmp/operator-source; however, you can use a different location.\n For additional information about obtaining the operator source, see the Developer Guide Requirements.\n Copy the sample to a new directory; for example, use directory /tmp/mii-sample.\n$ mkdir /tmp/mii-sample $ cp -r /tmp/operator-source/kubernetes/samples/scripts/create-weblogic-domain/model-in-image/* /tmp/mii-sample Note: We will refer to this working copy of the sample as /tmp/mii-sample; however, you can use a different location. Make sure an operator is set up to manage namespace sample-domain1-ns. Also, make sure a Traefik ingress controller is managing the same namespace and listening on port 30305.\nFor example, follow the same steps as the Quick Start guide from the beginning through to the Prepare for a domain step.\nMake sure you stop when you complete the \u0026ldquo;Prepare for a domain\u0026rdquo; step and then resume following these instructions.\n Set up ingresses that will redirect HTTP from Traefik port 30305 to the clusters in this sample\u0026rsquo;s WebLogic domains.\n Option 1: To create the ingresses, use the following YAML to create a file called /tmp/mii-sample/ingresses/myingresses.yaml and then call kubectl apply -f /tmp/mii-sample/ingresses/myingresses.yaml:\napiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain1-admin-server namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain1 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: http: paths: - path: /console backend: serviceName: sample-domain1-admin-server servicePort: 7001 --- apiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain1-cluster-cluster-1 namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain1 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: sample-domain1-cluster-cluster-1.mii-sample.org http: paths: - path: backend: serviceName: sample-domain1-cluster-cluster-1 servicePort: 8001 --- apiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain1-cluster-cluster-2 namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain1 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: sample-domain1-cluster-cluster-2.mii-sample.org http: paths: - path: backend: serviceName: sample-domain1-cluster-cluster-2 servicePort: 8001 --- apiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain2-cluster-cluster-1 namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain2 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: sample-domain2-cluster-cluster-1.mii-sample.org http: paths: - path: backend: serviceName: sample-domain2-cluster-cluster-1 servicePort: 8001 Option 2: Run kubectl apply -f on each of the ingress YAML files that are already included in the sample source /tmp/mii-sample/ingresses directory:\n $ cd /tmp/mii-sample/ingresses $ kubectl apply -f traefik-ingress-sample-domain1-admin-server.yaml $ kubectl apply -f traefik-ingress-sample-domain1-cluster-cluster-1.yaml $ kubectl apply -f traefik-ingress-sample-domain1-cluster-cluster-2.yaml $ kubectl apply -f traefik-ingress-sample-domain2-cluster-cluster-1.yaml $ kubectl apply -f traefik-ingress-sample-domain2-cluster-cluster-2.yaml NOTE: We give each cluster ingress a different host name that is decorated using both its operator domain UID and its cluster name. This makes each cluster uniquely addressable even when cluster names are the same across different clusters. When using curl to access the WebLogic domain through the ingress, you will need to supply a host name header that matches the host names in the ingress.\n For more on information ingresses and load balancers, see Ingress.\n Obtain the WebLogic 12.2.1.4 image that is required to create the sample\u0026rsquo;s model images.\na. Use a browser to access Oracle Container Registry.\nb. Choose an image location: for JRF domains, select Middleware, then fmw-infrastructure; for WLS domains, select Middleware, then weblogic.\nc. Select Sign In and accept the license agreement.\nd. Use your terminal to log in to Docker locally: docker login container-registry.oracle.com.\ne. Later in this sample, when you run WebLogic Image Tool commands, the tool will use the image as a base image for creating model images. Specifically, the tool will implicitly call docker pull for one of the above licensed images as specified in the tool\u0026rsquo;s command line using the --fromImage parameter. For JRF, this sample specifies container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4, and for WLS, the sample specifies container-registry.oracle.com/middleware/weblogic:12.2.1.4.\nIf you prefer, you can create your own base image and then substitute this image name in the WebLogic Image Tool --fromImage parameter throughout this sample. See Preparing a Base Image.\n Download the latest WebLogic Deploying Tooling and WebLogic Image Tool installer ZIP files to your /tmp/mii-sample/model-images directory.\nBoth WDT and WIT are required to create your Model in Image Docker images. Download the latest version of each tool\u0026rsquo;s installer ZIP file to the /tmp/mii-sample/model-images directory.\nFor example, visit the GitHub WebLogic Deploy Tooling Releses and WebLogic Image Tool Releases web pages to determine the latest release version for each, and then, assuming the version numbers are 1.9.3 and 1.8.4 respectively, call:\n$ curl -m 30 -fL https://github.com/oracle/weblogic-deploy-tooling/releases/download/release-1.9.3/weblogic-deploy.zip \\ -o /tmp/mii-sample/model-images/weblogic-deploy.zip $ curl -m 30 -fL https://github.com/oracle/weblogic-image-tool/releases/download/release-1.8.4/imagetool.zip \\ -o /tmp/mii-sample/model-images/imagetool.zip Set up the WebLogic Image Tool.\nRun the following commands:\n$ cd /tmp/mii-sample/model-images $ unzip imagetool.zip $ ./imagetool/bin/imagetool.sh cache addInstaller \\ --type wdt \\ --version latest \\ --path /tmp/mii-sample/model-images/weblogic-deploy.zip These steps will install WIT to the /tmp/mii-sample/model-images/imagetool directory, plus put a wdt_latest entry in the tool\u0026rsquo;s cache which points to the WDT ZIP installer. We will use WIT later in the sample for creating model images.\n Additional prerequisites for JRF domains NOTE: If you\u0026rsquo;re using a WLS domain type, skip this section and continue here.\n JRF Prerequisites Contents Introduction to JRF setups Set up and initialize an infrastructure database Increase introspection job timeout Important considerations for RCU model attributes, domain resource attributes, and secrets Introduction to JRF setups NOTE: The requirements in this section are in addition to Prerequisites for all domain types.\n A JRF domain requires an infrastructure database, initializing this database with RCU, and configuring your domain to access this database. All of these steps must occur before you create your domain.\nSet up and initialize an infrastructure database A JRF domain requires an infrastructure database and also requires initializing this database with a schema and a set of tables. The following example shows how to set up a database and use the RCU tool to create the infrastructure schema for a JRF domain. The database is set up with the following attributes:\n Attribute Value database Kubernetes namespace default database Kubernetes pod oracle-db database image container-registry.oracle.com/database/enterprise:12.2.0.1-slim database password Oradoc_db1 infrastructure schema prefix FMW1 infrastructure schema password Oradoc_db1 database URL oracle-db.default.svc.cluster.local:1521/devpdb.k8s Ensure that you have access to the database image, and then create a deployment using it:\n Use a browser to log in to https://container-registry.oracle.com, select database-\u0026gt;enterprise and accept the license agreement.\n Get the database image:\n In the local shell, docker login container-registry.oracle.com. In the local shell, docker pull container-registry.oracle.com/database/enterprise:12.2.0.1-slim. Use the sample script in /tmp/operator-source/kubernetes/samples/scripts/create-oracle-db-service to create an Oracle database running in the pod, oracle-db.\n$ cd /tmp/operator-source/kubernetes/samples/scripts/create-oracle-db-service $ start-db-service.sh This script will deploy a database in the default namespace with the connect string oracle-db.default.svc.cluster.local:1521/devpdb.k8s, and administration password Oradoc_db1.\nThis step is based on the steps documented in Run a Database.\nWARNING: The Oracle Database Docker images are supported only for non-production use. For more details, see My Oracle Support note: Oracle Support for Database Running on Docker (Doc ID 2216342.1).\n Use the sample script in /tmp/operator-source/kubernetes/samples/scripts/create-rcu-schema to create the RCU schema with the schema prefix FMW1.\nNote that this script assumes Oradoc_db1 is the DBA password, Oradoc_db1 is the schema password, and that the database URL is oracle-db.default.svc.cluster.local:1521/devpdb.k8s.\n$ cd /tmp/operator-source/kubernetes/samples/scripts/create-rcu-schema $ ./create-rcu-schema.sh -s FMW1 -i container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4 NOTE: If you need to drop the repository, use this command:\n$ drop-rcu-schema.sh -s FMW1 Increase introspection job timeout The JRF domain home creation can take more time than the introspection job\u0026rsquo;s default timeout. You should increase the timeout for the introspection job. Use the configuration.introspectorJobActiveDeadlineSeconds in your domain resource to override the default with a value of at least 300 seconds (the default is 120 seconds). Note that the JRF versions of the domain resource files that are provided in /tmp/mii-sample/domain-resources already set this value.\nImportant considerations for RCU model attributes, domain resource attributes, and secrets To allow Model in Image to access the database and OPSS wallet, you must create an RCU access secret containing the database connect string, user name, and password that\u0026rsquo;s referenced from your model and an OPSS wallet password secret that\u0026rsquo;s referenced from your domain resource before deploying your domain. It\u0026rsquo;s also necessary to define an RCUDbInfo stanza in your model.\nThe sample includes examples of JRF models and domain resources in the /tmp/mii-sample/model-images and /tmp/mii-sample/domain-resources directories, and instructions in the following sections will describe setting up the RCU and OPSS secrets.\nWhen you follow the instructions later in this sample, avoid instructions that are WLS only, and substitute JRF for WLS in the corresponding model image tags and domain resource file names.\nFor example:\n JRF domain resources in this sample have an opss.walletPasswordSecret field that references a secret named sample-domain1-opss-wallet-password-secret, with password=welcome1.\n JRF image models in this sample have a domainInfo -\u0026gt; RCUDbInfo stanza that reference a sample-domain1-rcu-access secret with appropriate values for attributes rcu_prefix, rcu_schema_password, and rcu_db_conn_string for accessing the Oracle database that you deployed to the default namespace as one of the prerequisite steps.\n Important considerations for reusing or sharing OPSS tables We do not recommend that most users share OPSS tables. Extreme caution is required when sharing OPSS tables between domains.\n When you successfully deploy your JRF domain resource for the first time, the introspector job will initialize the OPSS tables for the domain using the domainInfo -\u0026gt; RCUDbInfo stanza in the WDT model plus the configuration.opss.walletPasswordSecret specified in the domain resource. The job will also create a new domain home. Finally, the operator will also capture an OPSS wallet file from the new domain\u0026rsquo;s local directory and place this file in a new Kubernetes ConfigMap.\nThere are scenarios when the domain needs to be recreated between updates, such as when WebLogic credentials are changed, security roles defined in the WDT model have been changed, or you want to share the same infrastructure tables with different domains. In these scenarios, the operator needs the walletPasswordSecret as well as the OPSS wallet file, together with the exact information in domainInfo -\u0026gt; RCUDbInfo so that the domain can be recreated and access the same set of tables. Without the wallet file and wallet password, you will not be able to recreate a domain accessing the same set of tables, therefore we strongly recommend that you back up the wallet file.\nTo recover a domain\u0026rsquo;s OPSS tables between domain restarts or to share an OPSS schema between different domains, it is necessary to extract this wallet file from the domain\u0026rsquo;s automatically deployed introspector ConfigMap and save the OPSS wallet password secret that was used for the original domain. The wallet password and wallet file are needed again when you recreate the domain or share the database with other domains.\nTo save the wallet file, assuming that your namespace is sample-domain1-ns and your domain UID is sample-domain1:\n $ kubectl -n sample-domain1-ns \\ get configmap sample-domain1-weblogic-domain-introspect-cm \\ -o jsonpath='{.data.ewallet\\.p12}' \\ \u0026gt; ./ewallet.p12 Alternatively, you can save the file using the sample\u0026rsquo;s wallet utility:\n $ /tmp/mii-sample/utils/opss-wallet.sh -n sample-domain1-ns -d sample-domain1 -wf ./ewallet.p12 # For help: /tmp/mii-sample/utils/opss-wallet.sh -? Important! Back up your wallet file to a safe location that can be retrieved later.\nTo reuse the wallet file in subsequent redeployments or to share the domain\u0026rsquo;s OPSS tables between different domains:\n Load the saved wallet file into a secret with a key named walletFile (again, assuming that your domain UID is sample-domain1 and your namespace is sample-domain1-ns): $ kubectl -n sample-domain1-ns create secret generic sample-domain1-opss-walletfile-secret \\ --from-file=walletFile=./ewallet.p12 $ kubectl -n sample-domain1-ns label secret sample-domain1-opss-walletfile-secret \\ weblogic.domainUID=`sample-domain1` Alternatively, use the sample\u0026rsquo;s wallet utility:\n $ /tmp/mii-sample/utils/opss-wallet.sh -n sample-domain1-ns -d sample-domain1 -wf ./ewallet.p12 -ws sample-domain1-opss-walletfile-secret # For help: /tmp/mii-sample/utils/opss-wallet.sh -? Modify your domain resource JRF YAML files to provide the wallet file secret name, for example: configuration: opss: # Name of secret with walletPassword for extracting the wallet walletPasswordSecret: sample-domain1-opss-wallet-password-secret # Name of secret with walletFile containing base64 encoded opss wallet walletFileSecret: sample-domain1-opss-walletfile-secret Note: The sample JRF domain resource files included in /tmp/mii-sample/domain-resources already have the above YAML stanza.\n Initial use case Contents Overview Image creation Image creation - Introduction Understanding our first archive Staging a ZIP file of the archive Staging model files Creating the image with WIT Deploy resources Deploy resources - Introduction Secrets Domain resource Overview In this use case, we set up an initial WebLogic domain. This involves:\n A WDT archive ZIP file that contains your applications. A WDT model that describes your WebLogic configuration. A Docker image that contains your WDT model files and archive. Creating secrets for the domain. Creating a domain resource for the domain that references your secrets and image. After the domain resource is deployed, the WebLogic operator will start an \u0026lsquo;introspector job\u0026rsquo; that converts your models into a WebLogic configuration, and then the operator will pass this configuration to each WebLogic Server in the domain.\nPerform the steps in Prerequisites for all domain types before performing the steps in this use case.\nIf you are taking the JRF path through the sample, then substitute JRF for WLS in your image names and directory paths. Also note that the JRF-v1 model YAML differs from the WLS-v1 YAML file (it contains an additional domainInfo -\u0026gt; RCUDbInfo stanza).\n Image creation - Introduction The goal of the initial use case \u0026lsquo;image creation\u0026rsquo; is to demonstrate using the WebLogic Image Tool to create an image named model-in-image:WLS-v1 from files that we will stage to /tmp/mii-sample/model-images/model-in-image:WLS-v1/. The staged files will contain a web application in a WDT archive, and WDT model configuration for a WebLogic Administration Server called admin-server and a WebLogic cluster called cluster-1.\nOverall, a Model in Image image must contain a WebLogic installation and also a WebLogic Deploy Tooling installation in its /u01/wdt/weblogic-deploy directory. In addition, if you have WDT model archive files, then the image must also contain these files in its /u01/wdt/models directory. Finally, an image may optionally also contain your WDT model YAML and properties files in the same /u01/wdt/models directory. If you do not specify WDT model YAML in your /u01/wdt/models directory, then the model YAML must be supplied dynamically using a Kubernetes ConfigMap that is referenced by your domain resource spec.model.configMap attribute. We will provide an example of using a model ConfigMap later in this sample.\nLet\u0026rsquo;s walk through the steps for creating the image model-in-image:WLS-v1:\n Understanding our first archive Staging a ZIP file of the archive Staging model files Creating the image with WIT Understanding our first archive The sample includes a predefined archive directory in /tmp/mii-sample/archives/archive-v1 that we will use to create an archive ZIP file for the image.\nThe archive top directory, named wlsdeploy, contains a directory named applications, which includes an \u0026lsquo;exploded\u0026rsquo; sample JSP web application in the directory, myapp-v1. Three useful aspects to remember about WDT archives are:\n A model image can contain multiple WDT archives. WDT archives can contain multiple applications, libraries, and other components. WDT archives have a well defined directory structure, which always has wlsdeploy as the top directory. If you are interested in the web application source, click here to see the JSP code. \u0026lt;%-- Copyright (c) 2019, 2020, Oracle Corporation and/or its affiliates. --%\u0026gt; \u0026lt;%-- Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. --%\u0026gt; \u0026lt;%@ page import=\u0026quot;javax.naming.InitialContext\u0026quot; %\u0026gt; \u0026lt;%@ page import=\u0026quot;javax.management.*\u0026quot; %\u0026gt; \u0026lt;%@ page import=\u0026quot;java.io.*\u0026quot; %\u0026gt; \u0026lt;% InitialContext ic = null; try { ic = new InitialContext(); String srName=System.getProperty(\u0026quot;weblogic.Name\u0026quot;); String domainUID=System.getenv(\u0026quot;DOMAIN_UID\u0026quot;); String domainName=System.getenv(\u0026quot;CUSTOM_DOMAIN_NAME\u0026quot;); out.println(\u0026quot;\u0026lt;html\u0026gt;\u0026lt;body\u0026gt;\u0026lt;pre\u0026gt;\u0026quot;); out.println(\u0026quot;*****************************************************************\u0026quot;); out.println(); out.println(\u0026quot;Hello World! This is version 'v1' of the mii-sample JSP web-app.\u0026quot;); out.println(); out.println(\u0026quot;Welcome to WebLogic server '\u0026quot; + srName + \u0026quot;'!\u0026quot;); out.println(); out.println(\u0026quot; domain UID = '\u0026quot; + domainUID +\u0026quot;'\u0026quot;); out.println(\u0026quot; domain name = '\u0026quot; + domainName +\u0026quot;'\u0026quot;); out.println(); MBeanServer mbs = (MBeanServer)ic.lookup(\u0026quot;java:comp/env/jmx/runtime\u0026quot;); // display the current server's cluster name Set\u0026lt;ObjectInstance\u0026gt; clusterRuntimes = mbs.queryMBeans(new ObjectName(\u0026quot;*:Type=ClusterRuntime,*\u0026quot;), null); out.println(\u0026quot;Found \u0026quot; + clusterRuntimes.size() + \u0026quot; local cluster runtime\u0026quot; + (String)((clusterRuntimes.size()!=1)?\u0026quot;s:\u0026quot;:\u0026quot;:\u0026quot;)); for (ObjectInstance clusterRuntime : clusterRuntimes) { String cName = (String)mbs.getAttribute(clusterRuntime.getObjectName(), \u0026quot;Name\u0026quot;); out.println(\u0026quot; Cluster '\u0026quot; + cName + \u0026quot;'\u0026quot;); } out.println(); // display local data sources ObjectName jdbcRuntime = new ObjectName(\u0026quot;com.bea:ServerRuntime=\u0026quot; + srName + \u0026quot;,Name=\u0026quot; + srName + \u0026quot;,Type=JDBCServiceRuntime\u0026quot;); ObjectName[] dataSources = (ObjectName[])mbs.getAttribute(jdbcRuntime, \u0026quot;JDBCDataSourceRuntimeMBeans\u0026quot;); out.println(\u0026quot;Found \u0026quot; + dataSources.length + \u0026quot; local data source\u0026quot; + (String)((dataSources.length!=1)?\u0026quot;s:\u0026quot;:\u0026quot;:\u0026quot;)); for (ObjectName dataSource : dataSources) { String dsName = (String)mbs.getAttribute(dataSource, \u0026quot;Name\u0026quot;); String dsState = (String)mbs.getAttribute(dataSource, \u0026quot;State\u0026quot;); out.println(\u0026quot; Datasource '\u0026quot; + dsName + \u0026quot;': State='\u0026quot; + dsState +\u0026quot;'\u0026quot;); } out.println(); out.println(\u0026quot;*****************************************************************\u0026quot;); } catch (Throwable t) { t.printStackTrace(new PrintStream(response.getOutputStream())); } finally { out.println(\u0026quot;\u0026lt;/pre\u0026gt;\u0026lt;/body\u0026gt;\u0026lt;/html\u0026gt;\u0026quot;); if (ic != null) ic.close(); } %\u0026gt; The application displays important details about the WebLogic Server that it\u0026rsquo;s running on: namely its domain name, cluster name, and server name, as well as the names of any data sources that are targeted to the server. You can also see that application output reports that it\u0026rsquo;s at version v1; we will update this to v2 in a future use case to demonstrate upgrading the application.\nStaging a ZIP file of the archive When we create our image, we will use the files in staging directory /tmp/mii-sample/model-in-image__WLS-v1. In preparation, we need it to contain a ZIP file of the WDT application archive.\nRun the following commands to create your application archive ZIP file and put it in the expected directory:\n# Delete existing archive.zip in case we have an old leftover version $ rm -f /tmp/mii-sample/model-images/model-in-image__WLS-v1/archive.zip # Move to the directory which contains the source files for our archive $ cd /tmp/mii-sample/archives/archive-v1 # Zip the archive to the location will later use when we run the WebLogic Image Tool $ zip -r /tmp/mii-sample/model-images/model-in-image__WLS-v1/archive.zip wlsdeploy Staging model files In this step, we explore the staged WDT model YAML file and properties in directory /tmp/mii-sample/model-in-image__WLS-v1. The model in this directory references the web application in our archive, configures a WebLogic Administration Server, and configures a WebLogic cluster. It consists of only two files, model.10.properties, a file with a single property, and, model.10.yaml, a YAML file with our WebLogic configuration model.10.yaml.\nCLUSTER_SIZE=5 Here is the WLS model.10.yaml:\ndomainInfo: AdminUserName: '@@SECRET:__weblogic-credentials__:username@@' AdminPassword: '@@SECRET:__weblogic-credentials__:password@@' ServerStartMode: 'prod' topology: Name: '@@ENV:CUSTOM_DOMAIN_NAME@@' AdminServerName: 'admin-server' Cluster: 'cluster-1': DynamicServers: ServerTemplate: 'cluster-1-template' ServerNamePrefix: 'managed-server' DynamicClusterSize: '@@PROP:CLUSTER_SIZE@@' MaxDynamicClusterSize: '@@PROP:CLUSTER_SIZE@@' MinDynamicClusterSize: '0' CalculatedListenPorts: false Server: 'admin-server': ListenPort: 7001 ServerTemplate: 'cluster-1-template': Cluster: 'cluster-1' ListenPort: 8001 appDeployments: Application: myapp: SourcePath: 'wlsdeploy/applications/myapp-v1' ModuleType: ear Target: 'cluster-1' Click here to expand the JRF `model.10.yaml`, and note the RCUDbInfo stanza and its references to a DOMAIN_UID-rcu-access secret. domainInfo: AdminUserName: '@@SECRET:__weblogic-credentials__:username@@' AdminPassword: '@@SECRET:__weblogic-credentials__:password@@' ServerStartMode: 'prod' RCUDbInfo: rcu_prefix: '@@SECRET:@@ENV:DOMAIN_UID@@-rcu-access:rcu_prefix@@' rcu_schema_password: '@@SECRET:@@ENV:DOMAIN_UID@@-rcu-access:rcu_schema_password@@' rcu_db_conn_string: '@@SECRET:@@ENV:DOMAIN_UID@@-rcu-access:rcu_db_conn_string@@' topology: AdminServerName: 'admin-server' Name: '@@ENV:CUSTOM_DOMAIN_NAME@@' Cluster: 'cluster-1': Server: 'admin-server': ListenPort: 7001 'managed-server1-c1-': Cluster: 'cluster-1' ListenPort: 8001 'managed-server2-c1-': Cluster: 'cluster-1' ListenPort: 8001 'managed-server3-c1-': Cluster: 'cluster-1' ListenPort: 8001 'managed-server4-c1-': Cluster: 'cluster-1' ListenPort: 8001 appDeployments: Application: myapp: SourcePath: 'wlsdeploy/applications/myapp-v1' ModuleType: ear Target: 'cluster-1' The model files:\n Define a WebLogic domain with:\n Cluster cluster-1 Administration Server admin-server A cluster-1 targeted ear application that\u0026rsquo;s located in the WDT archive ZIP file at wlsdeploy/applications/myapp-v1 Leverage macros to inject external values:\n The property file CLUSTER_SIZE property is referenced in the model YAML DynamicClusterSize and MaxDynamicClusterSize fields using a PROP macro. The model file domain name is injected using a custom environment variable named CUSTOM_DOMAIN_NAME using an ENV macro. We set this environment variable later in this sample using an env field in its domain resource. This conveniently provides a simple way to deploy multiple differently named domains using the same model image. The model file administrator user name and password are set using a weblogic-credentials secret macro reference to the WebLogic credential secret. This secret is in turn referenced using the weblogicCredentialsSecret field in the domain resource. The weblogic-credentials is a reserved name that always dereferences to the owning domain resource actual WebLogic credentials secret name. A Model in Image image can contain multiple properties files, archive ZIP files, and YAML files, but in this sample we use just one of each. For a full discussion of Model in Images model file naming conventions, file loading order, and macro syntax, see Model files in the Model in Image user documentation.\nCreating the image with WIT Note: If you are using JRF in this sample, substitute JRF for each occurrence of WLS in the imagetool command line below, plus substitute container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4 for the --fromImage value.\n At this point, we have staged all of the files needed for image model-in-image:WLS-v1, they include:\n /tmp/mii-sample/model-images/weblogic-deploy.zip /tmp/mii-sample/model-images/model-in-image__WLS-v1/model.10.yaml /tmp/mii-sample/model-images/model-in-image__WLS-v1/model.10.properties /tmp/mii-sample/model-images/model-in-image__WLS-v1/archive.zip If you don\u0026rsquo;t see the weblogic-deploy.zip file, then it means that you missed a step in the prerequisites.\nNow let\u0026rsquo;s use the Image Tool to create an image named model-in-image:WLS-v1 that\u0026rsquo;s layered on a base WebLogic image. We\u0026rsquo;ve already set up this tool during the prerequisite steps at the beginning of this sample.\nRun the following commands to create the model image and verify that it worked:\n$ cd /tmp/mii-sample/model-images $ ./imagetool/bin/imagetool.sh update \\ --tag model-in-image:WLS-v1 \\ --fromImage container-registry.oracle.com/middleware/weblogic:12.2.1.4 \\ --wdtModel ./model-in-image__WLS-v1/model.10.yaml \\ --wdtVariables ./model-in-image__WLS-v1/model.10.properties \\ --wdtArchive ./model-in-image__WLS-v1/archive.zip \\ --wdtModelOnly \\ --wdtDomainType WLS If you don\u0026rsquo;t see the imagetool directory, then it means that you missed a step in the prerequisites.\nThis command runs the WebLogic Image Tool in its Model in Image mode, and does the following:\n Builds the final Docker image as a layer on the container-registry.oracle.com/middleware/weblogic:12.2.1.4 base image. Copies the WDT ZIP file that\u0026rsquo;s referenced in the WIT cache into the image. Note that we cached WDT in WIT using the keyword latest when we set up the cache during the sample prerequisites steps. This lets WIT implicitly assume its the desired WDT version and removes the need to pass a -wdtVersion flag. Copies the specified WDT model, properties, and application archives to image location /u01/wdt/models. When the command succeeds, it should end with output like:\n[INFO ] Build successful. Build time=36s. Image tag=model-in-image:WLS-v1 Also, if you run the docker images command, then you should see a Docker image named model-in-image:WLS-v1.\nDeploy resources - Introduction In this section we will deploy our new image to namespace sample-domain1-ns, including the following steps:\n Create a secret containing your WebLogic administrator user name and password. Create a secret containing your Model in Image runtime encryption password: All Model in Image domains must supply a runtime encryption secret with a password value. It is used to encrypt configuration that is passed around internally by the operator. The value must be kept private but can be arbitrary; you can optionally supply a different secret value every time you restart the domain. If your domain type is JRF, create secrets containing your RCU access URL, credentials, and prefix. Deploy a domain resource YAML file that references the new image. Wait for the domain\u0026rsquo;s pods to start and reach their ready state. Secrets First, create the secrets needed by both WLS and JRF type model domains. In this case, we have two secrets.\nRun the following kubectl commands to deploy the required secrets:\n$ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-weblogic-credentials \\ --from-literal=username=weblogic --from-literal=password=welcome1 $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-weblogic-credentials \\ weblogic.domainUID=sample-domain1 $ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-runtime-encryption-secret \\ --from-literal=password=my_runtime_password $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-runtime-encryption-secret \\ weblogic.domainUID=sample-domain1 Some important details about these secrets:\n The WebLogic credentials secret:\n It is required and must contain username and password fields. It must be referenced by the spec.weblogicCredentialsSecret field in your domain resource. It also must be referenced by macros in the domainInfo.AdminUserName and domainInfo.AdminPassWord fields in your model YAML file. The Model WDT runtime secret:\n This is a special secret required by Model in Image. It must contain a password field. It must be referenced using the spec.model.runtimeEncryptionSecret attribute in its domain resource. It must remain the same for as long as the domain is deployed to Kubernetes, but can be changed between deployments. It is used to encrypt data as it\u0026rsquo;s internally passed using log files from the domain\u0026rsquo;s introspector job and on to its WebLogic Server pods. Deleting and recreating the secrets:\n We delete a secret before creating it, otherwise the create command will fail if the secret already exists. This allows us to change the secret when using the kubectl create secret command. We name and label secrets using their associated domain UID for two reasons:\n To make it obvious which secrets belong to which domains. To make it easier to clean up a domain. Typical cleanup scripts use the weblogic.domainUID label as a convenience for finding all resources associated with a domain. If you\u0026rsquo;re following the JRF path through the sample, then you also need to deploy the additional secret referenced by macros in the JRF model RCUDbInfo clause, plus an OPSS wallet password secret. For details about the uses of these secrets, see the Model in Image user documentation.\n Click here for the commands for deploying additional secrets for JRF. $ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-rcu-access \\ --from-literal=rcu_prefix=FMW1 \\ --from-literal=rcu_schema_password=Oradoc_db1 \\ --from-literal=rcu_db_conn_string=oracle-db.default.svc.cluster.local:1521/devpdb.k8s $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-rcu-access \\ weblogic.domainUID=sample-domain1 $ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-opss-wallet-password-secret \\ --from-literal=walletPassword=welcome1 $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-opss-wallet-password-secret \\ weblogic.domainUID=sample-domain1 Domain resource Now let\u0026rsquo;s create a domain resource. A domain resource is the key resource that tells the operator how to deploy a WebLogic domain.\nCopy the following to a file called /tmp/mii-sample/mii-initial.yaml or similar, or use the file /tmp/mii-sample/domain-resources/WLS/mii-initial-d1-WLS-v1.yaml that is included in the sample source.\n Click here to expand the WLS domain resource YAML. # # This is an example of how to define a Domain resource. # # If you are using 3.0.0-rc1, then the version on the following line # should be `v7` not `v6`. apiVersion: \u0026quot;weblogic.oracle/v6\u0026quot; kind: Domain metadata: name: sample-domain1 namespace: sample-domain1-ns labels: weblogic.resourceVersion: domain-v2 weblogic.domainUID: sample-domain1 spec: # Set to 'FromModel' to indicate 'Model in Image'. domainHomeSourceType: FromModel # The WebLogic Domain Home, this must be a location within # the image for 'Model in Image' domains. domainHome: /u01/domains/sample-domain1 # The WebLogic Server Docker image that the Operator uses to start the domain image: \u0026quot;model-in-image:WLS-v1\u0026quot; # Defaults to \u0026quot;Always\u0026quot; if image tag (version) is ':latest' imagePullPolicy: \u0026quot;IfNotPresent\u0026quot; # Identify which Secret contains the credentials for pulling an image #imagePullSecrets: #- name: regsecret # Identify which Secret contains the WebLogic Admin credentials, # the secret must contain 'username' and 'password' fields. webLogicCredentialsSecret: name: sample-domain1-weblogic-credentials # Whether to include the WebLogic server stdout in the pod's stdout, default is true includeServerOutInPodLog: true # Whether to enable overriding your log file location, see also 'logHome' #logHomeEnabled: false # The location for domain log, server logs, server out, and Node Manager log files # see also 'logHomeEnabled', 'volumes', and 'volumeMounts'. #logHome: /shared/logs/sample-domain1 # Set which WebLogic servers the Operator will start # - \u0026quot;NEVER\u0026quot; will not start any server in the domain # - \u0026quot;ADMIN_ONLY\u0026quot; will start up only the administration server (no managed servers will be started) # - \u0026quot;IF_NEEDED\u0026quot; will start all non-clustered servers, including the administration server, and clustered servers up to their replica count. serverStartPolicy: \u0026quot;IF_NEEDED\u0026quot; # Settings for all server pods in the domain including the introspector job pod serverPod: # Optional new or overridden environment variables for the domain's pods # - This sample uses CUSTOM_DOMAIN_NAME in its image model file # to set the Weblogic domain name env: - name: CUSTOM_DOMAIN_NAME value: \u0026quot;domain1\u0026quot; - name: JAVA_OPTIONS value: \u0026quot;-Dweblogic.StdoutDebugEnabled=false\u0026quot; - name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom \u0026quot; # Optional volumes and mounts for the domain's pods. See also 'logHome'. #volumes: #- name: weblogic-domain-storage-volume # persistentVolumeClaim: # claimName: sample-domain1-weblogic-sample-pvc #volumeMounts: #- mountPath: /shared # name: weblogic-domain-storage-volume # The desired behavior for starting the domain's administration server. adminServer: # The serverStartState legal values are \u0026quot;RUNNING\u0026quot; or \u0026quot;ADMIN\u0026quot; # \u0026quot;RUNNING\u0026quot; means the listed server will be started up to \u0026quot;RUNNING\u0026quot; mode # \u0026quot;ADMIN\u0026quot; means the listed server will be start up to \u0026quot;ADMIN\u0026quot; mode serverStartState: \u0026quot;RUNNING\u0026quot; # Setup a Kubernetes node port for the administration server default channel #adminService: # channels: # - channelName: default # nodePort: 30701 # The number of managed servers to start for unlisted clusters replicas: 1 # The desired behavior for starting a specific cluster's member servers clusters: - clusterName: cluster-1 serverStartState: \u0026quot;RUNNING\u0026quot; replicas: 2 # Change the `restartVersion` to force the introspector job to rerun # and apply any new model configuration, to also force a subsequent # roll of your domain's WebLogic pods. restartVersion: '1' configuration: # Settings for domainHomeSourceType 'FromModel' model: # Valid model domain types are 'WLS', 'JRF', and 'RestrictedJRF', default is 'WLS' domainType: \u0026quot;WLS\u0026quot; # Optional configmap for additional models and variable files #configMap: sample-domain1-wdt-config-map # All 'FromModel' domains require a runtimeEncryptionSecret with a 'password' field runtimeEncryptionSecret: sample-domain1-runtime-encryption-secret # Secrets that are referenced by model yaml macros # (the model yaml in the optional configMap or in the image) #secrets: #- sample-domain1-datasource-secret Click here to expand the JRF domain resource YAML. # Copyright (c) 2020, Oracle Corporation and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # # This is an example of how to define a Domain resource. # # If you are using 3.0.0-rc1, then the version on the following line # should be `v7` not `v6`. apiVersion: \u0026quot;weblogic.oracle/v6\u0026quot; kind: Domain metadata: name: sample-domain1 namespace: sample-domain1-ns labels: weblogic.resourceVersion: domain-v2 weblogic.domainUID: sample-domain1 spec: # Set to 'FromModel' to indicate 'Model in Image'. domainHomeSourceType: FromModel # The WebLogic Domain Home, this must be a location within # the image for 'Model in Image' domains. domainHome: /u01/domains/sample-domain1 # The WebLogic Server Docker image that the Operator uses to start the domain image: \u0026quot;model-in-image:JRF-v1\u0026quot; # Defaults to \u0026quot;Always\u0026quot; if image tag (version) is ':latest' imagePullPolicy: \u0026quot;IfNotPresent\u0026quot; # Identify which Secret contains the credentials for pulling an image #imagePullSecrets: #- name: regsecret # Identify which Secret contains the WebLogic Admin credentials, # the secret must contain 'username' and 'password' fields. webLogicCredentialsSecret: name: sample-domain1-weblogic-credentials # Whether to include the WebLogic server stdout in the pod's stdout, default is true includeServerOutInPodLog: true # Whether to enable overriding your log file location, see also 'logHome' #logHomeEnabled: false # The location for domain log, server logs, server out, and Node Manager log files # see also 'logHomeEnabled', 'volumes', and 'volumeMounts'. #logHome: /shared/logs/sample-domain1 # Set which WebLogic servers the Operator will start # - \u0026quot;NEVER\u0026quot; will not start any server in the domain # - \u0026quot;ADMIN_ONLY\u0026quot; will start up only the administration server (no managed servers will be started) # - \u0026quot;IF_NEEDED\u0026quot; will start all non-clustered servers, including the administration server, and clustered servers up to their replica count. serverStartPolicy: \u0026quot;IF_NEEDED\u0026quot; # Settings for all server pods in the domain including the introspector job pod serverPod: # Optional new or overridden environment variables for the domain's pods # - This sample uses CUSTOM_DOMAIN_NAME in its image model file # to set the Weblogic domain name env: - name: CUSTOM_DOMAIN_NAME value: \u0026quot;domain1\u0026quot; - name: JAVA_OPTIONS value: \u0026quot;-Dweblogic.StdoutDebugEnabled=false\u0026quot; - name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom \u0026quot; # Optional volumes and mounts for the domain's pods. See also 'logHome'. #volumes: #- name: weblogic-domain-storage-volume # persistentVolumeClaim: # claimName: sample-domain1-weblogic-sample-pvc #volumeMounts: #- mountPath: /shared # name: weblogic-domain-storage-volume # The desired behavior for starting the domain's administration server. adminServer: # The serverStartState legal values are \u0026quot;RUNNING\u0026quot; or \u0026quot;ADMIN\u0026quot; # \u0026quot;RUNNING\u0026quot; means the listed server will be started up to \u0026quot;RUNNING\u0026quot; mode # \u0026quot;ADMIN\u0026quot; means the listed server will be start up to \u0026quot;ADMIN\u0026quot; mode serverStartState: \u0026quot;RUNNING\u0026quot; # Setup a Kubernetes node port for the administration server default channel #adminService: # channels: # - channelName: default # nodePort: 30701 # The number of managed servers to start for unlisted clusters replicas: 1 # The desired behavior for starting a specific cluster's member servers clusters: - clusterName: cluster-1 serverStartState: \u0026quot;RUNNING\u0026quot; replicas: 2 # Change the restartVersion to force the introspector job to rerun # and apply any new model configuration, to also force a subsequent # roll of your domain's WebLogic pods. restartVersion: '1' configuration: # Settings for domainHomeSourceType 'FromModel' model: # Valid model domain types are 'WLS', 'JRF', and 'RestrictedJRF', default is 'WLS' domainType: \u0026quot;JRF\u0026quot; # Optional configmap for additional models and variable files #configMap: sample-domain1-wdt-config-map # All 'FromModel' domains require a runtimeEncryptionSecret with a 'password' field runtimeEncryptionSecret: sample-domain1-runtime-encryption-secret # Secrets that are referenced by model yaml macros # (the model yaml in the optional configMap or in the image) secrets: #- sample-domain1-datasource-secret - sample-domain1-rcu-access # Increase the introspector job active timeout value for JRF use cases introspectorJobActiveDeadlineSeconds: 300 opss: # Name of secret with walletPassword for extracting the wallet, used for JRF domains walletPasswordSecret: sample-domain1-opss-wallet-password-secret # Name of secret with walletFile containing base64 encoded opss wallet, used for JRF domains #walletFileSecret: sample-domain1-opss-walletfile-secret Run the following command to create the domain custom resource:\n$ kubectl apply -f /tmp/mii-sample/domain-resources/WLS/mii-initial-d1-WLS-v1.yaml Note: If you are choosing not to use the predefined domain resource YAML file and instead created your own domain resource file earlier, then substitute your custom file name in the above command. You might recall that we suggested naming it /tmp/mii-sample/mii-initial.yaml.\n If you run kubectl get pods -n sample-domain1-ns --watch, then you should see the introspector job run and your WebLogic Server pods start. The output should look something like this:\n Click here to expand. $ kubectl get pods -n sample-domain1-ns --watch NAME READY STATUS RESTARTS AGE sample-domain1-introspect-domain-job-lqqj9 0/1 Pending 0 0s sample-domain1-introspect-domain-job-lqqj9 0/1 ContainerCreating 0 0s sample-domain1-introspect-domain-job-lqqj9 1/1 Running 0 1s sample-domain1-introspect-domain-job-lqqj9 0/1 Completed 0 65s sample-domain1-introspect-domain-job-lqqj9 0/1 Terminating 0 65s sample-domain1-admin-server 0/1 Pending 0 0s sample-domain1-admin-server 0/1 ContainerCreating 0 0s sample-domain1-admin-server 0/1 Running 0 1s sample-domain1-admin-server 1/1 Running 0 32s sample-domain1-managed-server1 0/1 Pending 0 0s sample-domain1-managed-server2 0/1 Pending 0 0s sample-domain1-managed-server1 0/1 ContainerCreating 0 0s sample-domain1-managed-server2 0/1 ContainerCreating 0 0s sample-domain1-managed-server1 0/1 Running 0 2s sample-domain1-managed-server2 0/1 Running 0 2s sample-domain1-managed-server1 1/1 Running 0 43s sample-domain1-managed-server2 1/1 Running 0 42s Alternatively, you can run /tmp/mii-sample/utils/wl-pod-wait.sh -p 3. This is a utility script that provides useful information about a domain\u0026rsquo;s pods and waits for them to reach a ready state, reach their target restartVersion, and reach their target image before exiting.\n Click here to expand the `wl-pod-wait.sh` usage. $ ./wl-pod-wait.sh -? Usage: wl-pod-wait.sh [-n mynamespace] [-d mydomainuid] \\ [-p expected_pod_count] \\ [-t timeout_secs] \\ [-q] Exits non-zero if 'timeout_secs' is reached before 'pod_count' is reached. Parameters: -d \u0026lt;domain_uid\u0026gt; : Defaults to 'sample-domain1'. -n \u0026lt;namespace\u0026gt; : Defaults to 'sample-domain1-ns'. pod_count \u0026gt; 0 : Wait until exactly 'pod_count' WebLogic server pods for a domain all (a) are ready, (b) have the same 'domainRestartVersion' label value as the current domain resource's 'spec.restartVersion, and (c) have the same image as the current domain resource's image. pod_count = 0 : Wait until there are no running WebLogic server pods for a domain. The default. -t \u0026lt;timeout\u0026gt; : Timeout in seconds. Defaults to '600'. -q : Quiet mode. Show only a count of wl pods that have reached the desired criteria. -? : This help. Click here to expand sample output from `wl-pod-wait.sh`. @@ [2020-04-30T13:50:42][seconds=0] Info: Waiting up to 600 seconds for exactly '3' WebLogic server pods to reach the following criteria: @@ [2020-04-30T13:50:42][seconds=0] Info: ready='true' @@ [2020-04-30T13:50:42][seconds=0] Info: image='model-in-image:WLS-v1' @@ [2020-04-30T13:50:42][seconds=0] Info: domainRestartVersion='1' @@ [2020-04-30T13:50:42][seconds=0] Info: namespace='sample-domain1-ns' @@ [2020-04-30T13:50:42][seconds=0] Info: domainUID='sample-domain1' @@ [2020-04-30T13:50:42][seconds=0] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:50:42][seconds=0] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----- ----- --------- 'sample-domain1-introspect-domain-job-rkdkg' '' '' '' 'Pending' @@ [2020-04-30T13:50:45][seconds=3] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:50:45][seconds=3] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----- ----- --------- 'sample-domain1-introspect-domain-job-rkdkg' '' '' '' 'Running' @@ [2020-04-30T13:51:50][seconds=68] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:51:50][seconds=68] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE ---- ------- ----- ----- ----- @@ [2020-04-30T13:51:59][seconds=77] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:51:59][seconds=77] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE ----------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:52:02][seconds=80] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:52:02][seconds=80] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE ----------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'false' 'Running' @@ [2020-04-30T13:52:32][seconds=110] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:52:32][seconds=110] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'false' 'Pending' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:52:34][seconds=112] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:52:34][seconds=112] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'false' 'Running' @@ [2020-04-30T13:53:14][seconds=152] Info: '3' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:53:14][seconds=152] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:53:14][seconds=152] Info: Success! If you see an error, then consult Debugging in the Model in Image user guide.\nInvoke the web application Now that all the initial use case resources have been deployed, you can invoke the sample web application through the Traefik ingress controller\u0026rsquo;s NodePort. Note: The web application will display a list of any data sources it finds, but we don\u0026rsquo;t expect it to find any because the model doesn\u0026rsquo;t contain any at this point.\nSend a web application request to the load balancer:\n$ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp Or, if Traefik is unavailable and your Administration Server pod is running, you can use kubectl exec:\n$ kubectl exec -n sample-domain1-ns sample-domain1-admin-server -- bash -c \\ \u0026quot;curl -s -S -m 10 http://sample-domain1-cluster-cluster-1:8001/myapp_war/index.jsp\u0026quot; You should see output like the following:\n$ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp \u0026lt;html\u0026gt;\u0026lt;body\u0026gt;\u0026lt;pre\u0026gt; ***************************************************************** Hello World! This is version 'v1' of the mii-sample JSP web-app. Welcome to WebLogic server 'managed-server2'! domain UID = 'sample-domain1' domain name = 'domain1' Found 1 local cluster runtime: Cluster 'cluster-1' Found 0 local data sources: ***************************************************************** \u0026lt;/pre\u0026gt;\u0026lt;/body\u0026gt;\u0026lt;/html\u0026gt; Note: If you\u0026rsquo;re running your curl commands on a remote machine, then substitute localhost with an external address suitable for contacting your Kubernetes cluster. A Kubernetes cluster address that often works can be obtained by using the address just after https:// in the KubeDNS line of the output from the kubectl cluster-info command.\nIf you want to continue to the next use case, then leave your domain running.\nUpdate1 use case This use case demonstrates dynamically adding a data source to your running domain. It demonstrates several features of WDT and Model in Image:\n The syntax used for updating a model is exactly the same syntax you use for creating the original model. A domain\u0026rsquo;s model can be updated dynamically by supplying a model update in a file in a Kubernetes ConfigMap. Model updates can be as simple as changing the value of a single attribute, or more complex, such as adding a JMS Server. For a detailed discussion of model updates, see Runtime Updates in the Model in Image user guide.\nThe operator does not support all possible dynamic model updates. For model update limitations, consult Runtime Updates in the Model in Image user docs, and carefully test any model update before attempting a dynamic update in production.\n Here are the steps:\n Ensure that you have a running domain.\nMake sure you have deployed the domain from the Initial use case.\n Create a data source model YAML file.\nCreate a WDT model snippet for a data source (or use the example provided). Make sure that its target is set to cluster-1, and that its initial capacity is set to 0.\nThe reason for the latter is to prevent the data source from causing a WebLogic Server startup failure if it can\u0026rsquo;t find the database, which would be likely to happen because we haven\u0026rsquo;t deployed one (unless you\u0026rsquo;re using the JRF path through the sample).\nHere\u0026rsquo;s an example data source model configuration that meets these criteria:\nresources: JDBCSystemResource: mynewdatasource: Target: 'cluster-1' JdbcResource: JDBCDataSourceParams: JNDIName: [ jdbc/mydatasource1, jdbc/mydatasource2 ] GlobalTransactionsProtocol: TwoPhaseCommit JDBCDriverParams: DriverName: oracle.jdbc.xa.client.OracleXADataSource URL: '@@SECRET:@@ENV:DOMAIN_UID@@-datasource-secret:url@@' PasswordEncrypted: '@@SECRET:@@ENV:DOMAIN_UID@@-datasource-secret:password@@' Properties: user: Value: 'sys as sysdba' oracle.net.CONNECT_TIMEOUT: Value: 5000 oracle.jdbc.ReadTimeout: Value: 30000 JDBCConnectionPoolParams: InitialCapacity: 0 MaxCapacity: 1 TestTableName: SQL ISVALID TestConnectionsOnReserve: true Place the above model snippet in a file named /tmp/mii-sample/mydatasource.yaml and then use it in the later step where we deploy the model ConfigMap, or alternatively, use the same data source that\u0026rsquo;s provided in /tmp/mii-sample/model-configmaps/datasource/model.20.datasource.yaml.\n Create the data source secret.\nThe data source references a new secret that needs to be created. Run the following commands to create the secret:\n$ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-datasource-secret \\ --from-literal=password=Oradoc_db1 \\ --from-literal=url=jdbc:oracle:thin:@oracle-db.default.svc.cluster.local:1521/devpdb.k8s $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-datasource-secret \\ weblogic.domainUID=sample-domain1 We name and label secrets using their associated domain UID for two reasons:\n To make it obvious which secret belongs to which domains. To make it easier to clean up a domain. Typical cleanup scripts use the weblogic.domainUID label as a convenience for finding all the resources associated with a domain. Create a ConfigMap with the WDT model that contains the data source definition.\nRun the following commands:\n$ kubectl -n sample-domain1-ns create configmap sample-domain1-wdt-config-map \\ --from-file=/tmp/mii-sample/model-configmaps/datasource $ kubectl -n sample-domain1-ns label configmap sample-domain1-wdt-config-map \\ weblogic.domainUID=sample-domain1 If you\u0026rsquo;ve created your own data source file, then substitute the file name in the --from-file= parameter (we suggested /tmp/mii-sample/mydatasource.yaml earlier). Note that the -from-file= parameter can reference a single file, in which case it puts the designated file in the ConfigMap, or it can reference a directory, in which case it populates the ConfigMap with all of the files in the designated directory. We name and label ConfigMap using their associated domain UID for two reasons:\n To make it obvious which ConfigMap belong to which domains. To make it easier to cleanup a domain. Typical cleanup scripts use the weblogic.domainUID label as a convenience for finding all resources associated with a domain. Update your domain resource to refer to the ConfigMap and secret.\n Option 1: Update your current domain resource file from the \u0026ldquo;Initial\u0026rdquo; use case.\n Add the secret to its spec.configuration.secrets stanza:\nspec: ... configuration: ... secrets: - sample-domain1-datasource-secret (Leave any existing secrets in place.)\n Change its spec.configuration.model.configMap to look like:\nspec: ... configuration: ... model: ... configMap: sample-domain1-wdt-config-map Apply your changed domain resource:\n$ kubectl apply -f your-domain-resource.yaml Option 2: Use the updated domain resource file that is supplied with the sample:\n$ kubectl apply -f /tmp/miisample/domain-resources/mii-update1-d1-WLS-v1-ds.yaml Restart (\u0026lsquo;roll\u0026rsquo;) the domain.\nNow that the data source is deployed in a ConfigMap and its secret is also deployed, and we have applied an updated domain resource with its spec.configuration.model.configMap and spec.configuration.secrets referencing the ConfigMap and secret, let\u0026rsquo;s tell the operator to roll the domain.\nWhen a model domain restarts, it will rerun its introspector job in order to regenerate its configuration, and it will also pass the configuration changes found by the introspector to each restarted server. One way to cause a running domain to restart is to change the domain\u0026rsquo;s spec.restartVersion. To do this:\n Option 1: Edit your domain custom resource.\n Call kubectl -n sample-domain1-ns edit domain sample-domain1. Edit the value of the spec.restartVersion field and save. The field is a string; typically, you use a number in this field and increment it with each restart. Option 2: Dynamically change your domain using kubectl patch.\n To get the current restartVersion call:\n$ kubectl -n sample-domain1-ns get domain sample-domain1 '-o=jsonpath={.spec.restartVersion}' Choose a new restart version that\u0026rsquo;s different from the current restart version.\n The field is a string; typically, you use a number in this field and increment it with each restart. Use kubectl patch to set the new value. For example, assuming the new restart version is 2:\n$ kubectl -n sample-domain1-ns patch domain sample-domain1 --type=json '-p=[{\u0026quot;op\u0026quot;: \u0026quot;replace\u0026quot;, \u0026quot;path\u0026quot;: \u0026quot;/spec/restartVersion\u0026quot;, \u0026quot;value\u0026quot;: \u0026quot;2\u0026quot; }]' Option 3: Use the sample helper script.\n Call /tmp/mii-sample/utils/patch-restart-version.sh -n sample-domain1-ns -d sample-domain1. This will perform the same kubectl get and kubectl patch commands as Option 2. Wait for the roll to complete.\nNow that you\u0026rsquo;ve started a domain roll, you\u0026rsquo;ll need to wait for it to complete if you want to verify that the data source was deployed.\n One way to do this is to call kubectl get pods -n sample-domain1-ns --watch and wait for the pods to cycle back to their ready state.\n Alternatively, you can run /tmp/mii-sample/utils/wl-pod-wait.sh -p 3. This is a utility script that provides useful information about a domain\u0026rsquo;s pods and waits for them to reach a ready state, reach their target restartVersion, and reach their target image before exiting.\n Click here to expand the `wl-pod-wait.sh` usage. $ ./wl-pod-wait.sh -? Usage: wl-pod-wait.sh [-n mynamespace] [-d mydomainuid] \\ [-p expected_pod_count] \\ [-t timeout_secs] \\ [-q] Exits non-zero if 'timeout_secs' is reached before 'pod_count' is reached. Parameters: -d \u0026lt;domain_uid\u0026gt; : Defaults to 'sample-domain1'. -n \u0026lt;namespace\u0026gt; : Defaults to 'sample-domain1-ns'. pod_count \u0026gt; 0 : Wait until exactly 'pod_count' WebLogic server pods for a domain all (a) are ready, (b) have the same 'domainRestartVersion' label value as the current domain resource's 'spec.restartVersion, and (c) have the same image as the current domain resource's image. pod_count = 0 : Wait until there are no running WebLogic server pods for a domain. The default. -t \u0026lt;timeout\u0026gt; : Timeout in seconds. Defaults to '600'. -q : Quiet mode. Show only a count of wl pods that have reached the desired criteria. -? : This help. Click here to expand sample output from `wl-pod-wait.sh` that shows a rolling domain. @@ [2020-04-30T13:53:19][seconds=0] Info: Waiting up to 600 seconds for exactly '3' WebLogic server pods to reach the following criteria: @@ [2020-04-30T13:53:19][seconds=0] Info: ready='true' @@ [2020-04-30T13:53:19][seconds=0] Info: image='model-in-image:WLS-v1' @@ [2020-04-30T13:53:19][seconds=0] Info: domainRestartVersion='2' @@ [2020-04-30T13:53:19][seconds=0] Info: namespace='sample-domain1-ns' @@ [2020-04-30T13:53:19][seconds=0] Info: domainUID='sample-domain1' @@ [2020-04-30T13:53:19][seconds=0] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:53:19][seconds=0] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-introspect-domain-job-wlkpr' '' '' '' 'Pending' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:53:20][seconds=1] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:53:20][seconds=1] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-introspect-domain-job-wlkpr' '' '' '' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:18][seconds=59] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:18][seconds=59] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----------------------- ------ ----------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-introspect-domain-job-wlkpr' '' '' '' 'Succeeded' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:19][seconds=60] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:19][seconds=60] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:31][seconds=72] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:31][seconds=72] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:40][seconds=81] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:40][seconds=81] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:52][seconds=93] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:52][seconds=93] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:58][seconds=99] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:58][seconds=99] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Pending' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:00][seconds=101] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:00][seconds=101] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:12][seconds=113] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:12][seconds=113] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:24][seconds=125] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:24][seconds=125] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:33][seconds=134] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:33][seconds=134] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:34][seconds=135] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:34][seconds=135] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'false' 'Pending' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:40][seconds=141] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:40][seconds=141] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:44][seconds=145] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:44][seconds=145] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:56:25][seconds=186] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:25][seconds=186] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:56:26][seconds=187] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:26][seconds=187] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:56:30][seconds=191] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:30][seconds=191] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:56:34][seconds=195] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:34][seconds=195] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '2' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:57:09][seconds=230] Info: '3' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:57:09][seconds=230] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '2' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:57:09][seconds=230] Info: Success! After your domain is running, you can call the sample web application to determine if the data source was deployed.\nSend a web application request to the ingress controller:\n$ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp Or, if Traefik is unavailable and your Administration Server pod is running, you can run kubectl exec:\n$ kubectl exec -n sample-domain1-ns sample-domain1-admin-server -- bash -c \\ \u0026quot;curl -s -S -m 10 http://sample-domain1-cluster-cluster-1:8001/myapp_war/index.jsp\u0026quot; You should see something like the following:\n Click here to see the expected web application output. $ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp \u0026lt;html\u0026gt;\u0026lt;body\u0026gt;\u0026lt;pre\u0026gt; ***************************************************************** Hello World! This is version 'v1' of the mii-sample JSP web-app. Welcome to WebLogic server 'managed-server1'! domain UID = 'sample-domain1' domain name = 'domain1' Found 1 local cluster runtime: Cluster 'cluster-1' Found 1 local data source: Datasource 'mynewdatasource': State='Running' ***************************************************************** \u0026lt;/pre\u0026gt;\u0026lt;/body\u0026gt;\u0026lt;/html\u0026gt; If you see an error, then consult Debugging in the Model in Image user guide.\nThis completes the sample scenarios.\nCleanup To remove the resources you have created in these samples:\n Delete the domain resources.\n$ /tmp/operator-source/kubernetes/samples/scripts/delete-domain/delete-weblogic-domain-resources.sh -d sample-domain1 $ /tmp/operator-source/kubernetes/samples/scripts/delete-domain/delete-weblogic-domain-resources.sh -d sample-domain2 This deletes the domain and any related resources that are labeled with the domain UID sample-domain1 and sample-domain2.\nIt leaves the namespace intact, the operator running, the load balancer running (if installed), and the database running (if installed).\n Note: When you delete a domain, the operator should detect your domain deletion and shut down its pods. Wait for these pods to exit before deleting the operator that monitors the sample-domain1-ns namespace. You can monitor this process using the command kubectl get pods -n sample-domain1-ns --watch (ctrl-c to exit).\n If you set up the Traefik ingress controller:\n$ helm delete --purge traefik-operator $ kubectl delete namespace traefik If you set up a database for JRF:\n$ /tmp/operator-source/kubernetes/samples/scripts/create-oracle-db-service/stop-db-service.sh Delete the operator and its namespace:\n$ helm delete --purge sample-weblogic-operator $ kubectl delete namespace sample-weblogic-operator-ns Delete the domain\u0026rsquo;s namespace:\n$ kubectl delete namespace sample-domain1-ns Delete the images you may have created in this sample:\n$ docker image rm model-in-image:WLS-v1 $ docker image rm model-in-image:WLS-v2 $ docker image rm model-in-image:JRF-v1 $ docker image rm model-in-image:JRF-v2 References For references to the relevant user documentation, see:\n Model in Image user documentation Oracle WebLogic Server Deploy Tooling Oracle WebLogic Image Tool "
},
{
"uri": "/weblogic-kubernetes-operator/samples/simple/rest/",
diff --git a/docs/2.5.0/samples/simple/domains/model-in-image/index.html b/docs/2.5.0/samples/simple/domains/model-in-image/index.html
index c2cdeb9a5b5..ee49043ed6f 100644
--- a/docs/2.5.0/samples/simple/domains/model-in-image/index.html
+++ b/docs/2.5.0/samples/simple/domains/model-in-image/index.html
@@ -3096,7 +3096,7 @@
Prerequisites for all domain types
Download the latest WebLogic Deploying Tooling and WebLogic Image Tool installer ZIP files to your /tmp/mii-sample/model-images directory.
Both WDT and WIT are required to create your Model in Image Docker images. Download the latest version of each tool’s installer ZIP file to the /tmp/mii-sample/model-images directory.
$ curl -m 30 -fL https://github.com/oracle/weblogic-deploy-tooling/releases/download/release-1.9.3/weblogic-deploy.zip \
-o /tmp/mii-sample/model-images/weblogic-deploy.zip
$ curl -m 30 -fL https://github.com/oracle/weblogic-image-tool/releases/download/release-1.8.4/imagetool.zip \
-o /tmp/mii-sample/model-images/imagetool.zip
diff --git a/docs/2.6.0/index.json b/docs/2.6.0/index.json
index 3ffc86b868b..fd43d0cb2bc 100644
--- a/docs/2.6.0/index.json
+++ b/docs/2.6.0/index.json
@@ -410,7 +410,7 @@
"title": "Model in image",
"tags": [],
"description": "Sample for supplying a WebLogic Deploy Tooling (WDT) model that the operator expands into a full domain home during runtime.",
- "content": " This feature is supported only in 3.0.0-rc1.\n Contents Introduction Model in Image domain types (WLS, JRF, and Restricted JRF) Use cases Sample directory structure Prerequisites for all domain types Additional prerequisites for JRF domains Initial use case: An initial WebLogic domain Update1 use case: Dynamically adding a data source using a model ConfigMap Cleanup References Introduction This sample demonstrates deploying a Model in Image domain home source type. Unlike Domain in PV and Domain in Image, Model in Image eliminates the need to pre-create your WebLogic domain home prior to deploying your domain resource. Instead, Model in Image uses a WebLogic Deploy Tooling (WDT) model to specify your WebLogic configuration.\nWDT models are a convenient and simple alternative to WebLogic WLST configuration scripts and templates. They compactly define a WebLogic domain using YAML files and support including application archives in a ZIP file. The WDT model format is described in the open source, WebLogic Deploy Tooling GitHub project, and the required directory structure for a WDT archive is specifically discussed here.\nFor more information on Model in Image, see the Model in Image user guide. For a comparison of Model in Image to other domain home source types, see Choose a domain home source type.\nModel in Image domain types (WLS, JRF, and Restricted JRF) There are three types of domains supported by Model in Image: a standard WLS domain, an Oracle Fusion Middleware Infrastructure Java Required Files (JRF) domain, and a RestrictedJRF domain. This sample demonstrates the WLS and JRF types.\nThe JRF domain path through the sample includes additional steps required for JRF: deploying an infrastructure database, initializing the database using the Repository Creation Utility (RCU) tool, referencing the infrastructure database from the WebLogic configuration, setting an Oracle Platform Security Services (OPSS) wallet password, and exporting/importing an OPSS wallet file. JRF domains may be used by Oracle products that layer on top of WebLogic Server, such as SOA and OSB. Similarly, RestrictedJRF domains may be used by Oracle layered products, such as Oracle Communications products.\nUse cases This sample demonstrates two Model in Image use cases:\n Initial: An initial WebLogic domain with the following characteristics:\n Image model-in-image:WLS-v1 with: A WebLogic installation A WebLogic Deploy Tooling (WDT) installation A WDT archive with version v1 of an exploded Java EE web application A WDT model with: A WebLogic Administration Server A WebLogic cluster A reference to the web application Kubernetes Secrets: WebLogic credentials Required WDT runtime password A domain resource with: spec.domainHomeSourceType: FromModel spec.image: model-in-image:WLS-v1 References to the secrets Update1: Demonstrates udpating the initial domain by dynamically adding a data source using a model ConfigMap:\n Image model-in-image:WLS-v1: Same image as Initial use case Kubernetes Secrets: Same as Initial use case plus secrets for data source credentials and URL Kubernetes ConfigMap with: A WDT model for a data source targeted to the cluster A domain resource with: Same as Initial use case plus: spec.model.configMap referencing the ConfigMap References to data source secrets Sample directory structure The sample contains the following files and directories:\n Location Description domain-resources JRF and WLS domain resources. archives Source code location for WebLogic Deploy Tooling application ZIP archives. model-images Staging for each model image\u0026rsquo;s WDT YAML, WDT properties, and WDT archive ZIP files. The directories in model images are named for their respective images. model-configmaps Staging files for a model ConfigMap that configures a data source. ingresses Ingress resources. utils/wl-pod-wait.sh Utility for watching the pods in a domain reach their expected restartVersion, image name, and ready state. utils/patch-restart-version.sh Utility for updating a running domain spec.restartVersion field (which causes it to \u0026lsquo;re-instrospect\u0026rsquo; and \u0026lsquo;roll\u0026rsquo;). utils/opss-wallet.sh Utility for exporting or importing a JRF domain OPSS wallet file. Prerequisites for all domain types Choose the type of domain you\u0026rsquo;re going to use throughout the sample, WLS or JRF.\n The first time you try this sample, we recommend that you choose WLS even if you\u0026rsquo;re familiar with JRF. This is because WLS is simpler and will more easily familiarize you with Model in Image concepts. We recommend choosing JRF only if you are already familiar with JRF, you have already tried the WLS path through this sample, and you have a definite use case where you need to use JRF. The JAVA_HOME environment variable must be set and must reference a valid JDK 8 or 11 installation.\n Get the operator source from the release/3.0.0-rc1 branch and put it in /tmp/weblogic-kubernetes-operator.\nFor example:\n$ cd /tmp $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git $ cd weblogic-kubernetes-operator $ git checkout release/3.0.0-rc1 Note: We will refer to the top directory of the operator source tree as /tmp/weblogic-kubernetes-operator; however, you can use a different location.\n For additional information about obtaining the operator source, see the Developer Guide Requirements.\n Copy the sample to a new directory; for example, use directory /tmp/mii-sample.\n$ mkdir /tmp/mii-sample $ cp -r /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-weblogic-domain/model-in-image/* /tmp/mii-sample Note: We will refer to this working copy of the sample as /tmp/mii-sample; however, you can use a different location. Make sure an operator is set up to manage namespace sample-domain1-ns. Also, make sure a Traefik ingress controller is managing the same namespace and listening on port 30305.\nFor example, follow the same steps as the Quick Start guide from the beginning through to the Prepare for a domain step.\nMake sure you stop when you complete the \u0026ldquo;Prepare for a domain\u0026rdquo; step and then resume following these instructions.\n Set up ingresses that will redirect HTTP from Traefik port 30305 to the clusters in this sample\u0026rsquo;s WebLogic domains.\n Option 1: To create the ingresses, use the following YAML to create a file called /tmp/mii-sample/ingresses/myingresses.yaml and then call kubectl apply -f /tmp/mii-sample/ingresses/myingresses.yaml:\napiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain1-admin-server namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain1 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: http: paths: - path: /console backend: serviceName: sample-domain1-admin-server servicePort: 7001 --- apiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain1-cluster-cluster-1 namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain1 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: sample-domain1-cluster-cluster-1.mii-sample.org http: paths: - path: backend: serviceName: sample-domain1-cluster-cluster-1 servicePort: 8001 --- apiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain1-cluster-cluster-2 namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain1 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: sample-domain1-cluster-cluster-2.mii-sample.org http: paths: - path: backend: serviceName: sample-domain1-cluster-cluster-2 servicePort: 8001 --- apiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain2-cluster-cluster-1 namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain2 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: sample-domain2-cluster-cluster-1.mii-sample.org http: paths: - path: backend: serviceName: sample-domain2-cluster-cluster-1 servicePort: 8001 Option 2: Run kubectl apply -f on each of the ingress YAML files that are already included in the sample source /tmp/mii-sample/ingresses directory:\n $ cd /tmp/mii-sample/ingresses $ kubectl apply -f traefik-ingress-sample-domain1-admin-server.yaml $ kubectl apply -f traefik-ingress-sample-domain1-cluster-cluster-1.yaml $ kubectl apply -f traefik-ingress-sample-domain1-cluster-cluster-2.yaml $ kubectl apply -f traefik-ingress-sample-domain2-cluster-cluster-1.yaml $ kubectl apply -f traefik-ingress-sample-domain2-cluster-cluster-2.yaml NOTE: We give each cluster ingress a different host name that is decorated using both its operator domain UID and its cluster name. This makes each cluster uniquely addressable even when cluster names are the same across different clusters. When using curl to access the WebLogic domain through the ingress, you will need to supply a host name header that matches the host names in the ingress.\n For more on information ingresses and load balancers, see Ingress.\n Obtain the WebLogic 12.2.1.4 image that is required to create the sample\u0026rsquo;s model images.\na. Use a browser to access Oracle Container Registry.\nb. Choose an image location: for JRF domains, select Middleware, then fmw-infrastructure; for WLS domains, select Middleware, then weblogic.\nc. Select Sign In and accept the license agreement.\nd. Use your terminal to log in to Docker locally: docker login container-registry.oracle.com.\ne. Later in this sample, when you run WebLogic Image Tool commands, the tool will use the image as a base image for creating model images. Specifically, the tool will implicitly call docker pull for one of the above licensed images as specified in the tool\u0026rsquo;s command line using the --fromImage parameter. For JRF, this sample specifies container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4, and for WLS, the sample specifies container-registry.oracle.com/middleware/weblogic:12.2.1.4.\nIf you prefer, you can create your own base image and then substitute this image name in the WebLogic Image Tool --fromImage parameter throughout this sample. See Preparing a Base Image.\n Download the latest WebLogic Deploying Tooling and WebLogic Image Tool installer ZIP files to your /tmp/mii-sample/model-images directory.\nBoth WDT and WIT are required to create your Model in Image Docker images. Download the latest version of each tool\u0026rsquo;s installer ZIP file to the /tmp/mii-sample/model-images directory.\nFor example, visit the GitHub WebLogic Deploy Tooling Releses and WebLogic Image Tool Releases web pages to determine the latest release version for each, and then, assuming the version numbers are 1.8.0 and 1.8.4 respectively, call:\n$ curl -m 30 -fL https://github.com/oracle/weblogic-deploy-tooling/releases/download/weblogic-deploy-tooling-1.8.0/weblogic-deploy.zip \\ -o /tmp/mii-sample/model-images/weblogic-deploy.zip $ curl -m 30 -fL https://github.com/oracle/weblogic-image-tool/releases/download/release-1.8.4/imagetool.zip \\ -o /tmp/mii-sample/model-images/imagetool.zip Set up the WebLogic Image Tool.\nRun the following commands:\n$ cd /tmp/mii-sample/model-images $ unzip imagetool.zip $ ./imagetool/bin/imagetool.sh cache addInstaller \\ --type wdt \\ --version latest \\ --path /tmp/mii-sample/model-images/weblogic-deploy.zip These steps will install WIT to the /tmp/mii-sample/model-images/imagetool directory, plus put a wdt_latest entry in the tool\u0026rsquo;s cache which points to the WDT ZIP installer. We will use WIT later in the sample for creating model images.\n Additional prerequisites for JRF domains NOTE: If you\u0026rsquo;re using a WLS domain type, skip this section and continue here.\n JRF Prerequisites Contents Introduction to JRF setups Set up and initialize an infrastructure database Increase introspection job timeout Important considerations for RCU model attributes, domain resource attributes, and secrets Introduction to JRF setups NOTE: The requirements in this section are in addition to Prerequisites for all domain types.\n A JRF domain requires an infrastructure database, initializing this database with RCU, and configuring your domain to access this database. All of these steps must occur before you create your domain.\nSet up and initialize an infrastructure database A JRF domain requires an infrastructure database and also requires initializing this database with a schema and a set of tables. The following example shows how to set up a database and use the RCU tool to create the infrastructure schema for a JRF domain. The database is set up with the following attributes:\n Attribute Value database Kubernetes namespace default database Kubernetes pod oracle-db database image container-registry.oracle.com/database/enterprise:12.2.0.1-slim database password Oradoc_db1 infrastructure schema prefix FMW1 infrastructure schema password Oradoc_db1 database URL oracle-db.default.svc.cluster.local:1521/devpdb.k8s Ensure that you have access to the database image, and then create a deployment using it:\n Use a browser to log in to https://container-registry.oracle.com, select database-\u0026gt;enterprise and accept the license agreement.\n Get the database image:\n In the local shell, docker login container-registry.oracle.com. In the local shell, docker pull container-registry.oracle.com/database/enterprise:12.2.0.1-slim. Use the sample script in /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oracle-db-service to create an Oracle database running in the pod, oracle-db.\n$ cd /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oracle-db-service $ start-db-service.sh This script will deploy a database in the default namespace with the connect string oracle-db.default.svc.cluster.local:1521/devpdb.k8s, and administration password Oradoc_db1.\nThis step is based on the steps documented in Run a Database.\nWARNING: The Oracle Database Docker images are supported only for non-production use. For more details, see My Oracle Support note: Oracle Support for Database Running on Docker (Doc ID 2216342.1).\n Use the sample script in /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-rcu-schema to create the RCU schema with the schema prefix FMW1.\nNote that this script assumes Oradoc_db1 is the DBA password, Oradoc_db1 is the schema password, and that the database URL is oracle-db.default.svc.cluster.local:1521/devpdb.k8s.\n$ cd /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-rcu-schema $ ./create-rcu-schema.sh -s FMW1 -i container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4 NOTE: If you need to drop the repository, use this command:\n$ drop-rcu-schema.sh -s FMW1 Increase introspection job timeout The JRF domain home creation can take more time than the introspection job\u0026rsquo;s default timeout. You should increase the timeout for the introspection job. Use the configuration.introspectorJobActiveDeadlineSeconds in your domain resource to override the default with a value of at least 300 seconds (the default is 120 seconds). Note that the JRF versions of the domain resource files that are provided in /tmp/mii-sample/domain-resources already set this value.\nImportant considerations for RCU model attributes, domain resource attributes, and secrets To allow Model in Image to access the database and OPSS wallet, you must create an RCU access secret containing the database connect string, user name, and password that\u0026rsquo;s referenced from your model and an OPSS wallet password secret that\u0026rsquo;s referenced from your domain resource before deploying your domain. It\u0026rsquo;s also necessary to define an RCUDbInfo stanza in your model.\nThe sample includes examples of JRF models and domain resources in the /tmp/mii-sample/model-images and /tmp/mii-sample/domain-resources directories, and instructions in the following sections will describe setting up the RCU and OPSS secrets.\nWhen you follow the instructions later in this sample, avoid instructions that are WLS only, and substitute JRF for WLS in the corresponding model image tags and domain resource file names.\nFor example:\n JRF domain resources in this sample have an opss.walletPasswordSecret field that references a secret named sample-domain1-opss-wallet-password-secret, with password=welcome1.\n JRF image models in this sample have a domainInfo -\u0026gt; RCUDbInfo stanza that reference a sample-domain1-rcu-access secret with appropriate values for attributes rcu_prefix, rcu_schema_password, and rcu_db_conn_string for accessing the Oracle database that you deployed to the default namespace as one of the prerequisite steps.\n Important considerations for reusing or sharing OPSS tables We do not recommend that most users share OPSS tables. Extreme caution is required when sharing OPSS tables between domains.\n When you successfully deploy your JRF domain resource for the first time, the introspector job will initialize the OPSS tables for the domain using the domainInfo -\u0026gt; RCUDbInfo stanza in the WDT model plus the configuration.opss.walletPasswordSecret specified in the domain resource. The job will also create a new domain home. Finally, the operator will also capture an OPSS wallet file from the new domain\u0026rsquo;s local directory and place this file in a new Kubernetes ConfigMap.\nThere are scenarios when the domain needs to be recreated between updates, such as when WebLogic credentials are changed, security roles defined in the WDT model have been changed, or you want to share the same infrastructure tables with different domains. In these scenarios, the operator needs the walletPasswordSecret as well as the OPSS wallet file, together with the exact information in domainInfo -\u0026gt; RCUDbInfo so that the domain can be recreated and access the same set of tables. Without the wallet file and wallet password, you will not be able to recreate a domain accessing the same set of tables, therefore we strongly recommend that you back up the wallet file.\nTo recover a domain\u0026rsquo;s OPSS tables between domain restarts or to share an OPSS schema between different domains, it is necessary to extract this wallet file from the domain\u0026rsquo;s automatically deployed introspector ConfigMap and save the OPSS wallet password secret that was used for the original domain. The wallet password and wallet file are needed again when you recreate the domain or share the database with other domains.\nTo save the wallet file, assuming that your namespace is sample-domain1-ns and your domain UID is sample-domain1:\n $ kubectl -n sample-domain1-ns \\ get configmap sample-domain1-weblogic-domain-introspect-cm \\ -o jsonpath='{.data.ewallet\\.p12}' \\ \u0026gt; ./ewallet.p12 Alternatively, you can save the file using the sample\u0026rsquo;s wallet utility:\n $ /tmp/mii-sample/utils/opss-wallet.sh -n sample-domain1-ns -d sample-domain1 -wf ./ewallet.p12 # For help: /tmp/mii-sample/utils/opss-wallet.sh -? Important! Back up your wallet file to a safe location that can be retrieved later.\nTo reuse the wallet file in subsequent redeployments or to share the domain\u0026rsquo;s OPSS tables between different domains:\n Load the saved wallet file into a secret with a key named walletFile (again, assuming that your domain UID is sample-domain1 and your namespace is sample-domain1-ns): $ kubectl -n sample-domain1-ns create secret generic sample-domain1-opss-walletfile-secret \\ --from-file=walletFile=./ewallet.p12 $ kubectl -n sample-domain1-ns label secret sample-domain1-opss-walletfile-secret \\ weblogic.domainUID=`sample-domain1` Alternatively, use the sample\u0026rsquo;s wallet utility:\n $ /tmp/mii-sample/utils/opss-wallet.sh -n sample-domain1-ns -d sample-domain1 -wf ./ewallet.p12 -ws sample-domain1-opss-walletfile-secret # For help: /tmp/mii-sample/utils/opss-wallet.sh -? Modify your domain resource JRF YAML files to provide the wallet file secret name, for example: configuration: opss: # Name of secret with walletPassword for extracting the wallet walletPasswordSecret: sample-domain1-opss-wallet-password-secret # Name of secret with walletFile containing base64 encoded opss wallet walletFileSecret: sample-domain1-opss-walletfile-secret Note: The sample JRF domain resource files included in /tmp/mii-sample/domain-resources already have the above YAML stanza.\n Initial use case Contents Overview Image creation Image creation - Introduction Understanding our first archive Staging a ZIP file of the archive Staging model files Creating the image with WIT Deploy resources Deploy resources - Introduction Secrets Domain resource Overview In this use case, we set up an initial WebLogic domain. This involves:\n A WDT archive ZIP file that contains your applications. A WDT model that describes your WebLogic configuration. A Docker image that contains your WDT model files and archive. Creating secrets for the domain. Creating a domain resource for the domain that references your secrets and image. After the domain resource is deployed, the WebLogic operator will start an \u0026lsquo;introspector job\u0026rsquo; that converts your models into a WebLogic configuration, and then the operator will pass this configuration to each WebLogic Server in the domain.\nPerform the steps in Prerequisites for all domain types before performing the steps in this use case.\nIf you are taking the JRF path through the sample, then substitute JRF for WLS in your image names and directory paths. Also note that the JRF-v1 model YAML differs from the WLS-v1 YAML file (it contains an additional domainInfo -\u0026gt; RCUDbInfo stanza).\n Image creation - Introduction The goal of the initial use case \u0026lsquo;image creation\u0026rsquo; is to demonstrate using the WebLogic Image Tool to create an image named model-in-image:WLS-v1 from files that we will stage to /tmp/mii-sample/model-images/model-in-image:WLS-v1/. The staged files will contain a web application in a WDT archive, and WDT model configuration for a WebLogic Administration Server called admin-server and a WebLogic cluster called cluster-1.\nOverall, a Model in Image image must contain a WebLogic installation and also a WebLogic Deploy Tooling installation in its /u01/wdt/weblogic-deploy directory. In addition, if you have WDT model archive files, then the image must also contain these files in its /u01/wdt/models directory. Finally, an image may optionally also contain your WDT model YAML and properties files in the same /u01/wdt/models directory. If you do not specify WDT model YAML in your /u01/wdt/models directory, then the model YAML must be supplied dynamically using a Kubernetes ConfigMap that is referenced by your domain resource spec.model.configMap attribute. We will provide an example of using a model ConfigMap later in this sample.\nLet\u0026rsquo;s walk through the steps for creating the image model-in-image:WLS-v1:\n Understanding our first archive Staging a ZIP file of the archive Staging model files Creating the image with WIT Understanding our first archive The sample includes a predefined archive directory in /tmp/mii-sample/archives/archive-v1 that we will use to create an archive ZIP file for the image.\nThe archive top directory, named wlsdeploy, contains a directory named applications, which includes an \u0026lsquo;exploded\u0026rsquo; sample JSP web application in the directory, myapp-v1. Three useful aspects to remember about WDT archives are:\n A model image can contain multiple WDT archives. WDT archives can contain multiple applications, libraries, and other components. WDT archives have a well defined directory structure, which always has wlsdeploy as the top directory. If you are interested in the web application source, click here to see the JSP code. \u0026lt;%-- Copyright (c) 2019, 2020, Oracle Corporation and/or its affiliates. --%\u0026gt; \u0026lt;%-- Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. --%\u0026gt; \u0026lt;%@ page import=\u0026quot;javax.naming.InitialContext\u0026quot; %\u0026gt; \u0026lt;%@ page import=\u0026quot;javax.management.*\u0026quot; %\u0026gt; \u0026lt;%@ page import=\u0026quot;java.io.*\u0026quot; %\u0026gt; \u0026lt;% InitialContext ic = null; try { ic = new InitialContext(); String srName=System.getProperty(\u0026quot;weblogic.Name\u0026quot;); String domainUID=System.getenv(\u0026quot;DOMAIN_UID\u0026quot;); String domainName=System.getenv(\u0026quot;CUSTOM_DOMAIN_NAME\u0026quot;); out.println(\u0026quot;\u0026lt;html\u0026gt;\u0026lt;body\u0026gt;\u0026lt;pre\u0026gt;\u0026quot;); out.println(\u0026quot;*****************************************************************\u0026quot;); out.println(); out.println(\u0026quot;Hello World! This is version 'v1' of the mii-sample JSP web-app.\u0026quot;); out.println(); out.println(\u0026quot;Welcome to WebLogic server '\u0026quot; + srName + \u0026quot;'!\u0026quot;); out.println(); out.println(\u0026quot; domain UID = '\u0026quot; + domainUID +\u0026quot;'\u0026quot;); out.println(\u0026quot; domain name = '\u0026quot; + domainName +\u0026quot;'\u0026quot;); out.println(); MBeanServer mbs = (MBeanServer)ic.lookup(\u0026quot;java:comp/env/jmx/runtime\u0026quot;); // display the current server's cluster name Set\u0026lt;ObjectInstance\u0026gt; clusterRuntimes = mbs.queryMBeans(new ObjectName(\u0026quot;*:Type=ClusterRuntime,*\u0026quot;), null); out.println(\u0026quot;Found \u0026quot; + clusterRuntimes.size() + \u0026quot; local cluster runtime\u0026quot; + (String)((clusterRuntimes.size()!=1)?\u0026quot;s:\u0026quot;:\u0026quot;:\u0026quot;)); for (ObjectInstance clusterRuntime : clusterRuntimes) { String cName = (String)mbs.getAttribute(clusterRuntime.getObjectName(), \u0026quot;Name\u0026quot;); out.println(\u0026quot; Cluster '\u0026quot; + cName + \u0026quot;'\u0026quot;); } out.println(); // display local data sources ObjectName jdbcRuntime = new ObjectName(\u0026quot;com.bea:ServerRuntime=\u0026quot; + srName + \u0026quot;,Name=\u0026quot; + srName + \u0026quot;,Type=JDBCServiceRuntime\u0026quot;); ObjectName[] dataSources = (ObjectName[])mbs.getAttribute(jdbcRuntime, \u0026quot;JDBCDataSourceRuntimeMBeans\u0026quot;); out.println(\u0026quot;Found \u0026quot; + dataSources.length + \u0026quot; local data source\u0026quot; + (String)((dataSources.length!=1)?\u0026quot;s:\u0026quot;:\u0026quot;:\u0026quot;)); for (ObjectName dataSource : dataSources) { String dsName = (String)mbs.getAttribute(dataSource, \u0026quot;Name\u0026quot;); String dsState = (String)mbs.getAttribute(dataSource, \u0026quot;State\u0026quot;); out.println(\u0026quot; Datasource '\u0026quot; + dsName + \u0026quot;': State='\u0026quot; + dsState +\u0026quot;'\u0026quot;); } out.println(); out.println(\u0026quot;*****************************************************************\u0026quot;); } catch (Throwable t) { t.printStackTrace(new PrintStream(response.getOutputStream())); } finally { out.println(\u0026quot;\u0026lt;/pre\u0026gt;\u0026lt;/body\u0026gt;\u0026lt;/html\u0026gt;\u0026quot;); if (ic != null) ic.close(); } %\u0026gt; The application displays important details about the WebLogic Server that it\u0026rsquo;s running on: namely its domain name, cluster name, and server name, as well as the names of any data sources that are targeted to the server. You can also see that application output reports that it\u0026rsquo;s at version v1; we will update this to v2 in a future use case to demonstrate upgrading the application.\nStaging a ZIP file of the archive When we create our image, we will use the files in staging directory /tmp/mii-sample/model-in-image__WLS-v1. In preparation, we need it to contain a ZIP file of the WDT application archive.\nRun the following commands to create your application archive ZIP file and put it in the expected directory:\n# Delete existing archive.zip in case we have an old leftover version $ rm -f /tmp/mii-sample/model-images/model-in-image__WLS-v1/archive.zip # Move to the directory which contains the source files for our archive $ cd /tmp/mii-sample/archives/archive-v1 # Zip the archive to the location will later use when we run the WebLogic Image Tool $ zip -r /tmp/mii-sample/model-images/model-in-image__WLS-v1/archive.zip wlsdeploy Staging model files In this step, we explore the staged WDT model YAML file and properties in directory /tmp/mii-sample/model-in-image__WLS-v1. The model in this directory references the web application in our archive, configures a WebLogic Administration Server, and configures a WebLogic cluster. It consists of only two files, model.10.properties, a file with a single property, and, model.10.yaml, a YAML file with our WebLogic configuration model.10.yaml.\nCLUSTER_SIZE=5 Here is the WLS model.10.yaml:\ndomainInfo: AdminUserName: '@@SECRET:__weblogic-credentials__:username@@' AdminPassword: '@@SECRET:__weblogic-credentials__:password@@' ServerStartMode: 'prod' topology: Name: '@@ENV:CUSTOM_DOMAIN_NAME@@' AdminServerName: 'admin-server' Cluster: 'cluster-1': DynamicServers: ServerTemplate: 'cluster-1-template' ServerNamePrefix: 'managed-server' DynamicClusterSize: '@@PROP:CLUSTER_SIZE@@' MaxDynamicClusterSize: '@@PROP:CLUSTER_SIZE@@' MinDynamicClusterSize: '0' CalculatedListenPorts: false Server: 'admin-server': ListenPort: 7001 ServerTemplate: 'cluster-1-template': Cluster: 'cluster-1' ListenPort: 8001 appDeployments: Application: myapp: SourcePath: 'wlsdeploy/applications/myapp-v1' ModuleType: ear Target: 'cluster-1' Click here to expand the JRF `model.10.yaml`, and note the RCUDbInfo stanza and its references to a DOMAIN_UID-rcu-access secret. domainInfo: AdminUserName: '@@SECRET:__weblogic-credentials__:username@@' AdminPassword: '@@SECRET:__weblogic-credentials__:password@@' ServerStartMode: 'prod' RCUDbInfo: rcu_prefix: '@@SECRET:@@ENV:DOMAIN_UID@@-rcu-access:rcu_prefix@@' rcu_schema_password: '@@SECRET:@@ENV:DOMAIN_UID@@-rcu-access:rcu_schema_password@@' rcu_db_conn_string: '@@SECRET:@@ENV:DOMAIN_UID@@-rcu-access:rcu_db_conn_string@@' topology: AdminServerName: 'admin-server' Name: '@@ENV:CUSTOM_DOMAIN_NAME@@' Cluster: 'cluster-1': Server: 'admin-server': ListenPort: 7001 'managed-server1-c1-': Cluster: 'cluster-1' ListenPort: 8001 'managed-server2-c1-': Cluster: 'cluster-1' ListenPort: 8001 'managed-server3-c1-': Cluster: 'cluster-1' ListenPort: 8001 'managed-server4-c1-': Cluster: 'cluster-1' ListenPort: 8001 appDeployments: Application: myapp: SourcePath: 'wlsdeploy/applications/myapp-v1' ModuleType: ear Target: 'cluster-1' The model files:\n Define a WebLogic domain with:\n Cluster cluster-1 Administration Server admin-server A cluster-1 targeted ear application that\u0026rsquo;s located in the WDT archive ZIP file at wlsdeploy/applications/myapp-v1 Leverage macros to inject external values:\n The property file CLUSTER_SIZE property is referenced in the model YAML DynamicClusterSize and MaxDynamicClusterSize fields using a PROP macro. The model file domain name is injected using a custom environment variable named CUSTOM_DOMAIN_NAME using an ENV macro. We set this environment variable later in this sample using an env field in its domain resource. This conveniently provides a simple way to deploy multiple differently named domains using the same model image. The model file administrator user name and password are set using a weblogic-credentials secret macro reference to the WebLogic credential secret. This secret is in turn referenced using the weblogicCredentialsSecret field in the domain resource. The weblogic-credentials is a reserved name that always dereferences to the owning domain resource actual WebLogic credentials secret name. A Model in Image image can contain multiple properties files, archive ZIP files, and YAML files, but in this sample we use just one of each. For a full discussion of Model in Images model file naming conventions, file loading order, and macro syntax, see Model files in the Model in Image user documentation.\nCreating the image with WIT Note: If you are using JRF in this sample, substitute JRF for each occurrence of WLS in the imagetool command line below, plus substitute container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4 for the --fromImage value.\n At this point, we have staged all of the files needed for image model-in-image:WLS-v1, they include:\n /tmp/mii-sample/model-images/weblogic-deploy.zip /tmp/mii-sample/model-images/model-in-image__WLS-v1/model.10.yaml /tmp/mii-sample/model-images/model-in-image__WLS-v1/model.10.properties /tmp/mii-sample/model-images/model-in-image__WLS-v1/archive.zip If you don\u0026rsquo;t see the weblogic-deploy.zip file, then it means that you missed a step in the prerequisites.\nNow let\u0026rsquo;s use the Image Tool to create an image named model-in-image:WLS-v1 that\u0026rsquo;s layered on a base WebLogic image. We\u0026rsquo;ve already set up this tool during the prerequisite steps at the beginning of this sample.\nRun the following commands to create the model image and verify that it worked:\n$ cd /tmp/mii-sample/model-images $ ./imagetool/bin/imagetool.sh update \\ --tag model-in-image:WLS-v1 \\ --fromImage container-registry.oracle.com/middleware/weblogic:12.2.1.4 \\ --wdtModel ./model-in-image__WLS-v1/model.10.yaml \\ --wdtVariables ./model-in-image__WLS-v1/model.10.properties \\ --wdtArchive ./model-in-image__WLS-v1/archive.zip \\ --wdtModelOnly \\ --wdtDomainType WLS If you don\u0026rsquo;t see the imagetool directory, then it means that you missed a step in the prerequisites.\nThis command runs the WebLogic Image Tool in its Model in Image mode, and does the following:\n Builds the final Docker image as a layer on the container-registry.oracle.com/middleware/weblogic:12.2.1.4 base image. Copies the WDT ZIP file that\u0026rsquo;s referenced in the WIT cache into the image. Note that we cached WDT in WIT using the keyword latest when we set up the cache during the sample prerequisites steps. This lets WIT implicitly assume its the desired WDT version and removes the need to pass a -wdtVersion flag. Copies the specified WDT model, properties, and application archives to image location /u01/wdt/models. When the command succeeds, it should end with output like:\n[INFO ] Build successful. Build time=36s. Image tag=model-in-image:WLS-v1 Also, if you run the docker images command, then you should see a Docker image named model-in-image:WLS-v1.\nDeploy resources - Introduction In this section we will deploy our new image to namespace sample-domain1-ns, including the following steps:\n Create a secret containing your WebLogic administrator user name and password. Create a secret containing your Model in Image runtime encryption password: All Model in Image domains must supply a runtime encryption secret with a password value. It is used to encrypt configuration that is passed around internally by the operator. The value must be kept private but can be arbitrary; you can optionally supply a different secret value every time you restart the domain. If your domain type is JRF, create secrets containing your RCU access URL, credentials, and prefix. Deploy a domain resource YAML file that references the new image. Wait for the domain\u0026rsquo;s pods to start and reach their ready state. Secrets First, create the secrets needed by both WLS and JRF type model domains. In this case, we have two secrets.\nRun the following kubectl commands to deploy the required secrets:\n$ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-weblogic-credentials \\ --from-literal=username=weblogic --from-literal=password=welcome1 $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-weblogic-credentials \\ weblogic.domainUID=sample-domain1 $ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-runtime-encryption-secret \\ --from-literal=password=my_runtime_password $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-runtime-encryption-secret \\ weblogic.domainUID=sample-domain1 Some important details about these secrets:\n The WebLogic credentials secret:\n It is required and must contain username and password fields. It must be referenced by the spec.weblogicCredentialsSecret field in your domain resource. It also must be referenced by macros in the domainInfo.AdminUserName and domainInfo.AdminPassWord fields in your model YAML file. The Model WDT runtime secret:\n This is a special secret required by Model in Image. It must contain a password field. It must be referenced using the spec.model.runtimeEncryptionSecret attribute in its domain resource. It must remain the same for as long as the domain is deployed to Kubernetes, but can be changed between deployments. It is used to encrypt data as it\u0026rsquo;s internally passed using log files from the domain\u0026rsquo;s introspector job and on to its WebLogic Server pods. Deleting and recreating the secrets:\n We delete a secret before creating it, otherwise the create command will fail if the secret already exists. This allows us to change the secret when using the kubectl create secret command. We name and label secrets using their associated domain UID for two reasons:\n To make it obvious which secrets belong to which domains. To make it easier to clean up a domain. Typical cleanup scripts use the weblogic.domainUID label as a convenience for finding all resources associated with a domain. If you\u0026rsquo;re following the JRF path through the sample, then you also need to deploy the additional secret referenced by macros in the JRF model RCUDbInfo clause, plus an OPSS wallet password secret. For details about the uses of these secrets, see the Model in Image user documentation.\n Click here for the commands for deploying additional secrets for JRF. $ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-rcu-access \\ --from-literal=rcu_prefix=FMW1 \\ --from-literal=rcu_schema_password=Oradoc_db1 \\ --from-literal=rcu_db_conn_string=oracle-db.default.svc.cluster.local:1521/devpdb.k8s $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-rcu-access \\ weblogic.domainUID=sample-domain1 $ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-opss-wallet-password-secret \\ --from-literal=walletPassword=welcome1 $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-opss-wallet-password-secret \\ weblogic.domainUID=sample-domain1 Domain resource Now let\u0026rsquo;s create a domain resource. A domain resource is the key resource that tells the operator how to deploy a WebLogic domain.\nCopy the following to a file called /tmp/mii-sample/mii-initial.yaml or similar, or use the file /tmp/mii-sample/domain-resources/WLS/mii-initial-d1-WLS-v1.yaml that is included in the sample source.\n Click here to expand the WLS domain resource YAML. # # This is an example of how to define a Domain resource. # # If you are using 3.0.0-rc1, then the version on the following line # should be `v8` not `v7`. apiVersion: \u0026quot;weblogic.oracle/v8\u0026quot; kind: Domain metadata: name: sample-domain1 namespace: sample-domain1-ns labels: weblogic.resourceVersion: domain-v2 weblogic.domainUID: sample-domain1 spec: # For 3.0.0-rc1, set to 'FromModel' to indicate 'Model in Image'. domainHomeSourceType: FromModel # The WebLogic Domain Home, this must be a location within # the image for 'Model in Image' domains. domainHome: /u01/domains/sample-domain1 # The WebLogic Server Docker image that the Operator uses to start the domain image: \u0026quot;model-in-image:WLS-v1\u0026quot; # Defaults to \u0026quot;Always\u0026quot; if image tag (version) is ':latest' imagePullPolicy: \u0026quot;IfNotPresent\u0026quot; # Identify which Secret contains the credentials for pulling an image #imagePullSecrets: #- name: regsecret # Identify which Secret contains the WebLogic Admin credentials, # the secret must contain 'username' and 'password' fields. webLogicCredentialsSecret: name: sample-domain1-weblogic-credentials # Whether to include the WebLogic server stdout in the pod's stdout, default is true includeServerOutInPodLog: true # Whether to enable overriding your log file location, see also 'logHome' #logHomeEnabled: false # The location for domain log, server logs, server out, and Node Manager log files # see also 'logHomeEnabled', 'volumes', and 'volumeMounts'. #logHome: /shared/logs/sample-domain1 # Set which WebLogic servers the Operator will start # - \u0026quot;NEVER\u0026quot; will not start any server in the domain # - \u0026quot;ADMIN_ONLY\u0026quot; will start up only the administration server (no managed servers will be started) # - \u0026quot;IF_NEEDED\u0026quot; will start all non-clustered servers, including the administration server, and clustered servers up to their replica count. serverStartPolicy: \u0026quot;IF_NEEDED\u0026quot; # Settings for all server pods in the domain including the introspector job pod serverPod: # Optional new or overridden environment variables for the domain's pods # - This sample uses CUSTOM_DOMAIN_NAME in its image model file # to set the Weblogic domain name env: - name: CUSTOM_DOMAIN_NAME value: \u0026quot;domain1\u0026quot; - name: JAVA_OPTIONS value: \u0026quot;-Dweblogic.StdoutDebugEnabled=false\u0026quot; - name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom \u0026quot; # Optional volumes and mounts for the domain's pods. See also 'logHome'. #volumes: #- name: weblogic-domain-storage-volume # persistentVolumeClaim: # claimName: sample-domain1-weblogic-sample-pvc #volumeMounts: #- mountPath: /shared # name: weblogic-domain-storage-volume # The desired behavior for starting the domain's administration server. adminServer: # The serverStartState legal values are \u0026quot;RUNNING\u0026quot; or \u0026quot;ADMIN\u0026quot; # \u0026quot;RUNNING\u0026quot; means the listed server will be started up to \u0026quot;RUNNING\u0026quot; mode # \u0026quot;ADMIN\u0026quot; means the listed server will be start up to \u0026quot;ADMIN\u0026quot; mode serverStartState: \u0026quot;RUNNING\u0026quot; # Setup a Kubernetes node port for the administration server default channel #adminService: # channels: # - channelName: default # nodePort: 30701 # The number of managed servers to start for unlisted clusters replicas: 1 # The desired behavior for starting a specific cluster's member servers clusters: - clusterName: cluster-1 serverStartState: \u0026quot;RUNNING\u0026quot; replicas: 2 # Change the `restartVersion` to force the introspector job to rerun # and apply any new model configuration, to also force a subsequent # roll of your domain's WebLogic pods. restartVersion: '1' configuration: # Settings for domainHomeSourceType 'FromModel' model: # Valid model domain types are 'WLS', 'JRF', and 'RestrictedJRF', default is 'WLS' domainType: \u0026quot;WLS\u0026quot; # Optional configmap for additional models and variable files #configMap: sample-domain1-wdt-config-map # All 'FromModel' domains require a runtimeEncryptionSecret with a 'password' field runtimeEncryptionSecret: sample-domain1-runtime-encryption-secret # Secrets that are referenced by model yaml macros # (the model yaml in the optional configMap or in the image) #secrets: #- sample-domain1-datasource-secret Click here to expand the JRF domain resource YAML. # Copyright (c) 2020, Oracle Corporation and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # # This is an example of how to define a Domain resource. # # If you are using 3.0.0-rc1, then the version on the following line # should be `v8` not `v7`. apiVersion: \u0026quot;weblogic.oracle/v7\u0026quot; kind: Domain metadata: name: sample-domain1 namespace: sample-domain1-ns labels: weblogic.resourceVersion: domain-v2 weblogic.domainUID: sample-domain1 spec: # Set to 'FromModel' to indicate 'Model in Image'. domainHomeSourceType: FromModel # The WebLogic Domain Home, this must be a location within # the image for 'Model in Image' domains. domainHome: /u01/domains/sample-domain1 # The WebLogic Server Docker image that the Operator uses to start the domain image: \u0026quot;model-in-image:JRF-v1\u0026quot; # Defaults to \u0026quot;Always\u0026quot; if image tag (version) is ':latest' imagePullPolicy: \u0026quot;IfNotPresent\u0026quot; # Identify which Secret contains the credentials for pulling an image #imagePullSecrets: #- name: regsecret # Identify which Secret contains the WebLogic Admin credentials, # the secret must contain 'username' and 'password' fields. webLogicCredentialsSecret: name: sample-domain1-weblogic-credentials # Whether to include the WebLogic server stdout in the pod's stdout, default is true includeServerOutInPodLog: true # Whether to enable overriding your log file location, see also 'logHome' #logHomeEnabled: false # The location for domain log, server logs, server out, and Node Manager log files # see also 'logHomeEnabled', 'volumes', and 'volumeMounts'. #logHome: /shared/logs/sample-domain1 # Set which WebLogic servers the Operator will start # - \u0026quot;NEVER\u0026quot; will not start any server in the domain # - \u0026quot;ADMIN_ONLY\u0026quot; will start up only the administration server (no managed servers will be started) # - \u0026quot;IF_NEEDED\u0026quot; will start all non-clustered servers, including the administration server, and clustered servers up to their replica count. serverStartPolicy: \u0026quot;IF_NEEDED\u0026quot; # Settings for all server pods in the domain including the introspector job pod serverPod: # Optional new or overridden environment variables for the domain's pods # - This sample uses CUSTOM_DOMAIN_NAME in its image model file # to set the Weblogic domain name env: - name: CUSTOM_DOMAIN_NAME value: \u0026quot;domain1\u0026quot; - name: JAVA_OPTIONS value: \u0026quot;-Dweblogic.StdoutDebugEnabled=false\u0026quot; - name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom \u0026quot; # Optional volumes and mounts for the domain's pods. See also 'logHome'. #volumes: #- name: weblogic-domain-storage-volume # persistentVolumeClaim: # claimName: sample-domain1-weblogic-sample-pvc #volumeMounts: #- mountPath: /shared # name: weblogic-domain-storage-volume # The desired behavior for starting the domain's administration server. adminServer: # The serverStartState legal values are \u0026quot;RUNNING\u0026quot; or \u0026quot;ADMIN\u0026quot; # \u0026quot;RUNNING\u0026quot; means the listed server will be started up to \u0026quot;RUNNING\u0026quot; mode # \u0026quot;ADMIN\u0026quot; means the listed server will be start up to \u0026quot;ADMIN\u0026quot; mode serverStartState: \u0026quot;RUNNING\u0026quot; # Setup a Kubernetes node port for the administration server default channel #adminService: # channels: # - channelName: default # nodePort: 30701 # The number of managed servers to start for unlisted clusters replicas: 1 # The desired behavior for starting a specific cluster's member servers clusters: - clusterName: cluster-1 serverStartState: \u0026quot;RUNNING\u0026quot; replicas: 2 # Change the restartVersion to force the introspector job to rerun # and apply any new model configuration, to also force a subsequent # roll of your domain's WebLogic pods. restartVersion: '1' configuration: # Settings for domainHomeSourceType 'FromModel' model: # Valid model domain types are 'WLS', 'JRF', and 'RestrictedJRF', default is 'WLS' domainType: \u0026quot;JRF\u0026quot; # Optional configmap for additional models and variable files #configMap: sample-domain1-wdt-config-map # All 'FromModel' domains require a runtimeEncryptionSecret with a 'password' field runtimeEncryptionSecret: sample-domain1-runtime-encryption-secret # Secrets that are referenced by model yaml macros # (the model yaml in the optional configMap or in the image) secrets: #- sample-domain1-datasource-secret - sample-domain1-rcu-access # Increase the introspector job active timeout value for JRF use cases introspectorJobActiveDeadlineSeconds: 300 opss: # Name of secret with walletPassword for extracting the wallet, used for JRF domains walletPasswordSecret: sample-domain1-opss-wallet-password-secret # Name of secret with walletFile containing base64 encoded opss wallet, used for JRF domains #walletFileSecret: sample-domain1-opss-walletfile-secret Run the following command to create the domain custom resource:\n$ kubectl apply -f /tmp/mii-sample/domain-resources/WLS/mii-initial-d1-WLS-v1.yaml Note: If you are choosing not to use the predefined domain resource YAML file and instead created your own domain resource file earlier, then substitute your custom file name in the above command. You might recall that we suggested naming it /tmp/mii-sample/mii-initial.yaml.\n If you run kubectl get pods -n sample-domain1-ns --watch, then you should see the introspector job run and your WebLogic Server pods start. The output should look something like this:\n Click here to expand. $ kubectl get pods -n sample-domain1-ns --watch NAME READY STATUS RESTARTS AGE sample-domain1-introspect-domain-job-lqqj9 0/1 Pending 0 0s sample-domain1-introspect-domain-job-lqqj9 0/1 ContainerCreating 0 0s sample-domain1-introspect-domain-job-lqqj9 1/1 Running 0 1s sample-domain1-introspect-domain-job-lqqj9 0/1 Completed 0 65s sample-domain1-introspect-domain-job-lqqj9 0/1 Terminating 0 65s sample-domain1-admin-server 0/1 Pending 0 0s sample-domain1-admin-server 0/1 ContainerCreating 0 0s sample-domain1-admin-server 0/1 Running 0 1s sample-domain1-admin-server 1/1 Running 0 32s sample-domain1-managed-server1 0/1 Pending 0 0s sample-domain1-managed-server2 0/1 Pending 0 0s sample-domain1-managed-server1 0/1 ContainerCreating 0 0s sample-domain1-managed-server2 0/1 ContainerCreating 0 0s sample-domain1-managed-server1 0/1 Running 0 2s sample-domain1-managed-server2 0/1 Running 0 2s sample-domain1-managed-server1 1/1 Running 0 43s sample-domain1-managed-server2 1/1 Running 0 42s Alternatively, you can run /tmp/mii-sample/utils/wl-pod-wait.sh -p 3. This is a utility script that provides useful information about a domain\u0026rsquo;s pods and waits for them to reach a ready state, reach their target restartVersion, and reach their target image before exiting.\n Click here to expand the `wl-pod-wait.sh` usage. $ ./wl-pod-wait.sh -? Usage: wl-pod-wait.sh [-n mynamespace] [-d mydomainuid] \\ [-p expected_pod_count] \\ [-t timeout_secs] \\ [-q] Exits non-zero if 'timeout_secs' is reached before 'pod_count' is reached. Parameters: -d \u0026lt;domain_uid\u0026gt; : Defaults to 'sample-domain1'. -n \u0026lt;namespace\u0026gt; : Defaults to 'sample-domain1-ns'. pod_count \u0026gt; 0 : Wait until exactly 'pod_count' WebLogic server pods for a domain all (a) are ready, (b) have the same 'domainRestartVersion' label value as the current domain resource's 'spec.restartVersion, and (c) have the same image as the current domain resource's image. pod_count = 0 : Wait until there are no running WebLogic server pods for a domain. The default. -t \u0026lt;timeout\u0026gt; : Timeout in seconds. Defaults to '600'. -q : Quiet mode. Show only a count of wl pods that have reached the desired criteria. -? : This help. Click here to expand sample output from `wl-pod-wait.sh`. @@ [2020-04-30T13:50:42][seconds=0] Info: Waiting up to 600 seconds for exactly '3' WebLogic server pods to reach the following criteria: @@ [2020-04-30T13:50:42][seconds=0] Info: ready='true' @@ [2020-04-30T13:50:42][seconds=0] Info: image='model-in-image:WLS-v1' @@ [2020-04-30T13:50:42][seconds=0] Info: domainRestartVersion='1' @@ [2020-04-30T13:50:42][seconds=0] Info: namespace='sample-domain1-ns' @@ [2020-04-30T13:50:42][seconds=0] Info: domainUID='sample-domain1' @@ [2020-04-30T13:50:42][seconds=0] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:50:42][seconds=0] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----- ----- --------- 'sample-domain1-introspect-domain-job-rkdkg' '' '' '' 'Pending' @@ [2020-04-30T13:50:45][seconds=3] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:50:45][seconds=3] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----- ----- --------- 'sample-domain1-introspect-domain-job-rkdkg' '' '' '' 'Running' @@ [2020-04-30T13:51:50][seconds=68] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:51:50][seconds=68] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE ---- ------- ----- ----- ----- @@ [2020-04-30T13:51:59][seconds=77] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:51:59][seconds=77] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE ----------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:52:02][seconds=80] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:52:02][seconds=80] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE ----------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'false' 'Running' @@ [2020-04-30T13:52:32][seconds=110] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:52:32][seconds=110] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'false' 'Pending' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:52:34][seconds=112] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:52:34][seconds=112] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'false' 'Running' @@ [2020-04-30T13:53:14][seconds=152] Info: '3' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:53:14][seconds=152] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:53:14][seconds=152] Info: Success! If you see an error, then consult Debugging in the Model in Image user guide.\nInvoke the web application Now that all the initial use case resources have been deployed, you can invoke the sample web application through the Traefik ingress controller\u0026rsquo;s NodePort. Note: The web application will display a list of any data sources it finds, but we don\u0026rsquo;t expect it to find any because the model doesn\u0026rsquo;t contain any at this point.\nSend a web application request to the load balancer:\n$ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp Or, if Traefik is unavailable and your Administration Server pod is running, you can use kubectl exec:\n$ kubectl exec -n sample-domain1-ns sample-domain1-admin-server -- bash -c \\ \u0026quot;curl -s -S -m 10 http://sample-domain1-cluster-cluster-1:8001/myapp_war/index.jsp\u0026quot; You should see output like the following:\n$ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp \u0026lt;html\u0026gt;\u0026lt;body\u0026gt;\u0026lt;pre\u0026gt; ***************************************************************** Hello World! This is version 'v1' of the mii-sample JSP web-app. Welcome to WebLogic server 'managed-server2'! domain UID = 'sample-domain1' domain name = 'domain1' Found 1 local cluster runtime: Cluster 'cluster-1' Found 0 local data sources: ***************************************************************** \u0026lt;/pre\u0026gt;\u0026lt;/body\u0026gt;\u0026lt;/html\u0026gt; Note: If you\u0026rsquo;re running your curl commands on a remote machine, then substitute localhost with an external address suitable for contacting your Kubernetes cluster. A Kubernetes cluster address that often works can be obtained by using the address just after https:// in the KubeDNS line of the output from the kubectl cluster-info command.\nIf you want to continue to the next use case, then leave your domain running.\nUpdate1 use case This use case demonstrates dynamically adding a data source to your running domain. It demonstrates several features of WDT and Model in Image:\n The syntax used for updating a model is exactly the same syntax you use for creating the original model. A domain\u0026rsquo;s model can be updated dynamically by supplying a model update in a file in a Kubernetes ConfigMap. Model updates can be as simple as changing the value of a single attribute, or more complex, such as adding a JMS Server. For a detailed discussion of model updates, see Runtime Updates in the Model in Image user guide.\nThe operator does not support all possible dynamic model updates. For model update limitations, consult Runtime Updates in the Model in Image user docs, and carefully test any model update before attempting a dynamic update in production.\n Here are the steps:\n Ensure that you have a running domain.\nMake sure you have deployed the domain from the Initial use case.\n Create a data source model YAML file.\nCreate a WDT model snippet for a data source (or use the example provided). Make sure that its target is set to cluster-1, and that its initial capacity is set to 0.\nThe reason for the latter is to prevent the data source from causing a WebLogic Server startup failure if it can\u0026rsquo;t find the database, which would be likely to happen because we haven\u0026rsquo;t deployed one (unless you\u0026rsquo;re using the JRF path through the sample).\nHere\u0026rsquo;s an example data source model configuration that meets these criteria:\nresources: JDBCSystemResource: mynewdatasource: Target: 'cluster-1' JdbcResource: JDBCDataSourceParams: JNDIName: [ jdbc/mydatasource1, jdbc/mydatasource2 ] GlobalTransactionsProtocol: TwoPhaseCommit JDBCDriverParams: DriverName: oracle.jdbc.xa.client.OracleXADataSource URL: '@@SECRET:@@ENV:DOMAIN_UID@@-datasource-secret:url@@' PasswordEncrypted: '@@SECRET:@@ENV:DOMAIN_UID@@-datasource-secret:password@@' Properties: user: Value: 'sys as sysdba' oracle.net.CONNECT_TIMEOUT: Value: 5000 oracle.jdbc.ReadTimeout: Value: 30000 JDBCConnectionPoolParams: InitialCapacity: 0 MaxCapacity: 1 TestTableName: SQL ISVALID TestConnectionsOnReserve: true Place the above model snippet in a file named /tmp/mii-sample/mydatasource.yaml and then use it in the later step where we deploy the model ConfigMap, or alternatively, use the same data source that\u0026rsquo;s provided in /tmp/mii-sample/model-configmaps/datasource/model.20.datasource.yaml.\n Create the data source secret.\nThe data source references a new secret that needs to be created. Run the following commands to create the secret:\n$ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-datasource-secret \\ --from-literal=password=Oradoc_db1 \\ --from-literal=url=jdbc:oracle:thin:@oracle-db.default.svc.cluster.local:1521/devpdb.k8s $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-datasource-secret \\ weblogic.domainUID=sample-domain1 We name and label secrets using their associated domain UID for two reasons:\n To make it obvious which secret belongs to which domains. To make it easier to clean up a domain. Typical cleanup scripts use the weblogic.domainUID label as a convenience for finding all the resources associated with a domain. Create a ConfigMap with the WDT model that contains the data source definition.\nRun the following commands:\n$ kubectl -n sample-domain1-ns create configmap sample-domain1-wdt-config-map \\ --from-file=/tmp/mii-sample/model-configmaps/datasource $ kubectl -n sample-domain1-ns label configmap sample-domain1-wdt-config-map \\ weblogic.domainUID=sample-domain1 If you\u0026rsquo;ve created your own data source file, then substitute the file name in the --from-file= parameter (we suggested /tmp/mii-sample/mydatasource.yaml earlier). Note that the -from-file= parameter can reference a single file, in which case it puts the designated file in the ConfigMap, or it can reference a directory, in which case it populates the ConfigMap with all of the files in the designated directory. We name and label ConfigMap using their associated domain UID for two reasons:\n To make it obvious which ConfigMap belong to which domains. To make it easier to cleanup a domain. Typical cleanup scripts use the weblogic.domainUID label as a convenience for finding all resources associated with a domain. Update your domain resource to refer to the ConfigMap and secret.\n Option 1: Update your current domain resource file from the \u0026ldquo;Initial\u0026rdquo; use case.\n Add the secret to its spec.configuration.secrets stanza:\nspec: ... configuration: ... secrets: - sample-domain1-datasource-secret (Leave any existing secrets in place.)\n Change its spec.configuration.model.configMap to look like:\nspec: ... configuration: ... model: ... configMap: sample-domain1-wdt-config-map Apply your changed domain resource:\n$ kubectl apply -f your-domain-resource.yaml Option 2: Use the updated domain resource file that is supplied with the sample:\n$ kubectl apply -f /tmp/miisample/domain-resources/mii-update1-d1-WLS-v1-ds.yaml Restart (\u0026lsquo;roll\u0026rsquo;) the domain.\nNow that the data source is deployed in a ConfigMap and its secret is also deployed, and we have applied an updated domain resource with its spec.configuration.model.configMap and spec.configuration.secrets referencing the ConfigMap and secret, let\u0026rsquo;s tell the operator to roll the domain.\nWhen a model domain restarts, it will rerun its introspector job in order to regenerate its configuration, and it will also pass the configuration changes found by the introspector to each restarted server. One way to cause a running domain to restart is to change the domain\u0026rsquo;s spec.restartVersion. To do this:\n Option 1: Edit your domain custom resource.\n Call kubectl -n sample-domain1-ns edit domain sample-domain1. Edit the value of the spec.restartVersion field and save. The field is a string; typically, you use a number in this field and increment it with each restart. Option 2: Dynamically change your domain using kubectl patch.\n To get the current restartVersion call:\n$ kubectl -n sample-domain1-ns get domain sample-domain1 '-o=jsonpath={.spec.restartVersion}' Choose a new restart version that\u0026rsquo;s different from the current restart version.\n The field is a string; typically, you use a number in this field and increment it with each restart. Use kubectl patch to set the new value. For example, assuming the new restart version is 2:\n$ kubectl -n sample-domain1-ns patch domain sample-domain1 --type=json '-p=[{\u0026quot;op\u0026quot;: \u0026quot;replace\u0026quot;, \u0026quot;path\u0026quot;: \u0026quot;/spec/restartVersion\u0026quot;, \u0026quot;value\u0026quot;: \u0026quot;2\u0026quot; }]' Option 3: Use the sample helper script.\n Call /tmp/mii-sample/utils/patch-restart-version.sh -n sample-domain1-ns -d sample-domain1. This will perform the same kubectl get and kubectl patch commands as Option 2. Wait for the roll to complete.\nNow that you\u0026rsquo;ve started a domain roll, you\u0026rsquo;ll need to wait for it to complete if you want to verify that the data source was deployed.\n One way to do this is to call kubectl get pods -n sample-domain1-ns --watch and wait for the pods to cycle back to their ready state.\n Alternatively, you can run /tmp/mii-sample/utils/wl-pod-wait.sh -p 3. This is a utility script that provides useful information about a domain\u0026rsquo;s pods and waits for them to reach a ready state, reach their target restartVersion, and reach their target image before exiting.\n Click here to expand the `wl-pod-wait.sh` usage. $ ./wl-pod-wait.sh -? Usage: wl-pod-wait.sh [-n mynamespace] [-d mydomainuid] \\ [-p expected_pod_count] \\ [-t timeout_secs] \\ [-q] Exits non-zero if 'timeout_secs' is reached before 'pod_count' is reached. Parameters: -d \u0026lt;domain_uid\u0026gt; : Defaults to 'sample-domain1'. -n \u0026lt;namespace\u0026gt; : Defaults to 'sample-domain1-ns'. pod_count \u0026gt; 0 : Wait until exactly 'pod_count' WebLogic server pods for a domain all (a) are ready, (b) have the same 'domainRestartVersion' label value as the current domain resource's 'spec.restartVersion, and (c) have the same image as the current domain resource's image. pod_count = 0 : Wait until there are no running WebLogic server pods for a domain. The default. -t \u0026lt;timeout\u0026gt; : Timeout in seconds. Defaults to '600'. -q : Quiet mode. Show only a count of wl pods that have reached the desired criteria. -? : This help. Click here to expand sample output from `wl-pod-wait.sh` that shows a rolling domain. @@ [2020-04-30T13:53:19][seconds=0] Info: Waiting up to 600 seconds for exactly '3' WebLogic server pods to reach the following criteria: @@ [2020-04-30T13:53:19][seconds=0] Info: ready='true' @@ [2020-04-30T13:53:19][seconds=0] Info: image='model-in-image:WLS-v1' @@ [2020-04-30T13:53:19][seconds=0] Info: domainRestartVersion='2' @@ [2020-04-30T13:53:19][seconds=0] Info: namespace='sample-domain1-ns' @@ [2020-04-30T13:53:19][seconds=0] Info: domainUID='sample-domain1' @@ [2020-04-30T13:53:19][seconds=0] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:53:19][seconds=0] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-introspect-domain-job-wlkpr' '' '' '' 'Pending' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:53:20][seconds=1] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:53:20][seconds=1] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-introspect-domain-job-wlkpr' '' '' '' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:18][seconds=59] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:18][seconds=59] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----------------------- ------ ----------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-introspect-domain-job-wlkpr' '' '' '' 'Succeeded' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:19][seconds=60] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:19][seconds=60] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:31][seconds=72] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:31][seconds=72] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:40][seconds=81] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:40][seconds=81] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:52][seconds=93] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:52][seconds=93] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:58][seconds=99] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:58][seconds=99] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Pending' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:00][seconds=101] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:00][seconds=101] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:12][seconds=113] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:12][seconds=113] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:24][seconds=125] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:24][seconds=125] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:33][seconds=134] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:33][seconds=134] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:34][seconds=135] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:34][seconds=135] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'false' 'Pending' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:40][seconds=141] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:40][seconds=141] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:44][seconds=145] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:44][seconds=145] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:56:25][seconds=186] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:25][seconds=186] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:56:26][seconds=187] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:26][seconds=187] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:56:30][seconds=191] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:30][seconds=191] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:56:34][seconds=195] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:34][seconds=195] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '2' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:57:09][seconds=230] Info: '3' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:57:09][seconds=230] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '2' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:57:09][seconds=230] Info: Success! After your domain is running, you can call the sample web application to determine if the data source was deployed.\nSend a web application request to the ingress controller:\n$ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp Or, if Traefik is unavailable and your Administration Server pod is running, you can run kubectl exec:\n$ kubectl exec -n sample-domain1-ns sample-domain1-admin-server -- bash -c \\ \u0026quot;curl -s -S -m 10 http://sample-domain1-cluster-cluster-1:8001/myapp_war/index.jsp\u0026quot; You should see something like the following:\n Click here to see the expected web application output. $ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp \u0026lt;html\u0026gt;\u0026lt;body\u0026gt;\u0026lt;pre\u0026gt; ***************************************************************** Hello World! This is version 'v1' of the mii-sample JSP web-app. Welcome to WebLogic server 'managed-server1'! domain UID = 'sample-domain1' domain name = 'domain1' Found 1 local cluster runtime: Cluster 'cluster-1' Found 1 local data source: Datasource 'mynewdatasource': State='Running' ***************************************************************** \u0026lt;/pre\u0026gt;\u0026lt;/body\u0026gt;\u0026lt;/html\u0026gt; If you see an error, then consult Debugging in the Model in Image user guide.\nThis completes the sample scenarios.\nCleanup To remove the resources you have created in these samples:\n Delete the domain resources.\n$ /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/delete-domain/delete-weblogic-domain-resources.sh -d sample-domain1 $ /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/delete-domain/delete-weblogic-domain-resources.sh -d sample-domain2 This deletes the domain and any related resources that are labeled with the domain UID sample-domain1 and sample-domain2.\nIt leaves the namespace intact, the operator running, the load balancer running (if installed), and the database running (if installed).\n Note: When you delete a domain, the operator should detect your domain deletion and shut down its pods. Wait for these pods to exit before deleting the operator that monitors the sample-domain1-ns namespace. You can monitor this process using the command kubectl get pods -n sample-domain1-ns --watch (ctrl-c to exit).\n If you set up the Traefik ingress controller:\n$ helm delete --purge traefik-operator $ kubectl delete namespace traefik If you set up a database for JRF:\n$ /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oracle-db-service/stop-db-service.sh Delete the operator and its namespace:\n$ helm delete --purge sample-weblogic-operator $ kubectl delete namespace sample-weblogic-operator-ns Delete the domain\u0026rsquo;s namespace:\n$ kubectl delete namespace sample-domain1-ns Delete the images you may have created in this sample:\n$ docker image rm model-in-image:WLS-v1 $ docker image rm model-in-image:WLS-v2 $ docker image rm model-in-image:JRF-v1 $ docker image rm model-in-image:JRF-v2 References For references to the relevant user documentation, see:\n Model in Image user documentation Oracle WebLogic Server Deploy Tooling Oracle WebLogic Image Tool "
+ "content": " This feature is supported only in 3.0.0-rc1.\n Contents Introduction Model in Image domain types (WLS, JRF, and Restricted JRF) Use cases Sample directory structure Prerequisites for all domain types Additional prerequisites for JRF domains Initial use case: An initial WebLogic domain Update1 use case: Dynamically adding a data source using a model ConfigMap Cleanup References Introduction This sample demonstrates deploying a Model in Image domain home source type. Unlike Domain in PV and Domain in Image, Model in Image eliminates the need to pre-create your WebLogic domain home prior to deploying your domain resource. Instead, Model in Image uses a WebLogic Deploy Tooling (WDT) model to specify your WebLogic configuration.\nWDT models are a convenient and simple alternative to WebLogic WLST configuration scripts and templates. They compactly define a WebLogic domain using YAML files and support including application archives in a ZIP file. The WDT model format is described in the open source, WebLogic Deploy Tooling GitHub project, and the required directory structure for a WDT archive is specifically discussed here.\nFor more information on Model in Image, see the Model in Image user guide. For a comparison of Model in Image to other domain home source types, see Choose a domain home source type.\nModel in Image domain types (WLS, JRF, and Restricted JRF) There are three types of domains supported by Model in Image: a standard WLS domain, an Oracle Fusion Middleware Infrastructure Java Required Files (JRF) domain, and a RestrictedJRF domain. This sample demonstrates the WLS and JRF types.\nThe JRF domain path through the sample includes additional steps required for JRF: deploying an infrastructure database, initializing the database using the Repository Creation Utility (RCU) tool, referencing the infrastructure database from the WebLogic configuration, setting an Oracle Platform Security Services (OPSS) wallet password, and exporting/importing an OPSS wallet file. JRF domains may be used by Oracle products that layer on top of WebLogic Server, such as SOA and OSB. Similarly, RestrictedJRF domains may be used by Oracle layered products, such as Oracle Communications products.\nUse cases This sample demonstrates two Model in Image use cases:\n Initial: An initial WebLogic domain with the following characteristics:\n Image model-in-image:WLS-v1 with: A WebLogic installation A WebLogic Deploy Tooling (WDT) installation A WDT archive with version v1 of an exploded Java EE web application A WDT model with: A WebLogic Administration Server A WebLogic cluster A reference to the web application Kubernetes Secrets: WebLogic credentials Required WDT runtime password A domain resource with: spec.domainHomeSourceType: FromModel spec.image: model-in-image:WLS-v1 References to the secrets Update1: Demonstrates udpating the initial domain by dynamically adding a data source using a model ConfigMap:\n Image model-in-image:WLS-v1: Same image as Initial use case Kubernetes Secrets: Same as Initial use case plus secrets for data source credentials and URL Kubernetes ConfigMap with: A WDT model for a data source targeted to the cluster A domain resource with: Same as Initial use case plus: spec.model.configMap referencing the ConfigMap References to data source secrets Sample directory structure The sample contains the following files and directories:\n Location Description domain-resources JRF and WLS domain resources. archives Source code location for WebLogic Deploy Tooling application ZIP archives. model-images Staging for each model image\u0026rsquo;s WDT YAML, WDT properties, and WDT archive ZIP files. The directories in model images are named for their respective images. model-configmaps Staging files for a model ConfigMap that configures a data source. ingresses Ingress resources. utils/wl-pod-wait.sh Utility for watching the pods in a domain reach their expected restartVersion, image name, and ready state. utils/patch-restart-version.sh Utility for updating a running domain spec.restartVersion field (which causes it to \u0026lsquo;re-instrospect\u0026rsquo; and \u0026lsquo;roll\u0026rsquo;). utils/opss-wallet.sh Utility for exporting or importing a JRF domain OPSS wallet file. Prerequisites for all domain types Choose the type of domain you\u0026rsquo;re going to use throughout the sample, WLS or JRF.\n The first time you try this sample, we recommend that you choose WLS even if you\u0026rsquo;re familiar with JRF. This is because WLS is simpler and will more easily familiarize you with Model in Image concepts. We recommend choosing JRF only if you are already familiar with JRF, you have already tried the WLS path through this sample, and you have a definite use case where you need to use JRF. The JAVA_HOME environment variable must be set and must reference a valid JDK 8 or 11 installation.\n Get the operator source from the release/3.0.0-rc1 branch and put it in /tmp/weblogic-kubernetes-operator.\nFor example:\n$ cd /tmp $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git $ cd weblogic-kubernetes-operator $ git checkout release/3.0.0-rc1 Note: We will refer to the top directory of the operator source tree as /tmp/weblogic-kubernetes-operator; however, you can use a different location.\n For additional information about obtaining the operator source, see the Developer Guide Requirements.\n Copy the sample to a new directory; for example, use directory /tmp/mii-sample.\n$ mkdir /tmp/mii-sample $ cp -r /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-weblogic-domain/model-in-image/* /tmp/mii-sample Note: We will refer to this working copy of the sample as /tmp/mii-sample; however, you can use a different location. Make sure an operator is set up to manage namespace sample-domain1-ns. Also, make sure a Traefik ingress controller is managing the same namespace and listening on port 30305.\nFor example, follow the same steps as the Quick Start guide from the beginning through to the Prepare for a domain step.\nMake sure you stop when you complete the \u0026ldquo;Prepare for a domain\u0026rdquo; step and then resume following these instructions.\n Set up ingresses that will redirect HTTP from Traefik port 30305 to the clusters in this sample\u0026rsquo;s WebLogic domains.\n Option 1: To create the ingresses, use the following YAML to create a file called /tmp/mii-sample/ingresses/myingresses.yaml and then call kubectl apply -f /tmp/mii-sample/ingresses/myingresses.yaml:\napiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain1-admin-server namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain1 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: http: paths: - path: /console backend: serviceName: sample-domain1-admin-server servicePort: 7001 --- apiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain1-cluster-cluster-1 namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain1 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: sample-domain1-cluster-cluster-1.mii-sample.org http: paths: - path: backend: serviceName: sample-domain1-cluster-cluster-1 servicePort: 8001 --- apiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain1-cluster-cluster-2 namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain1 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: sample-domain1-cluster-cluster-2.mii-sample.org http: paths: - path: backend: serviceName: sample-domain1-cluster-cluster-2 servicePort: 8001 --- apiVersion: extensions/v1beta1 kind: Ingress metadata: name: traefik-ingress-sample-domain2-cluster-cluster-1 namespace: sample-domain1-ns labels: weblogic.domainUID: sample-domain2 annotations: kubernetes.io/ingress.class: traefik spec: rules: - host: sample-domain2-cluster-cluster-1.mii-sample.org http: paths: - path: backend: serviceName: sample-domain2-cluster-cluster-1 servicePort: 8001 Option 2: Run kubectl apply -f on each of the ingress YAML files that are already included in the sample source /tmp/mii-sample/ingresses directory:\n $ cd /tmp/mii-sample/ingresses $ kubectl apply -f traefik-ingress-sample-domain1-admin-server.yaml $ kubectl apply -f traefik-ingress-sample-domain1-cluster-cluster-1.yaml $ kubectl apply -f traefik-ingress-sample-domain1-cluster-cluster-2.yaml $ kubectl apply -f traefik-ingress-sample-domain2-cluster-cluster-1.yaml $ kubectl apply -f traefik-ingress-sample-domain2-cluster-cluster-2.yaml NOTE: We give each cluster ingress a different host name that is decorated using both its operator domain UID and its cluster name. This makes each cluster uniquely addressable even when cluster names are the same across different clusters. When using curl to access the WebLogic domain through the ingress, you will need to supply a host name header that matches the host names in the ingress.\n For more on information ingresses and load balancers, see Ingress.\n Obtain the WebLogic 12.2.1.4 image that is required to create the sample\u0026rsquo;s model images.\na. Use a browser to access Oracle Container Registry.\nb. Choose an image location: for JRF domains, select Middleware, then fmw-infrastructure; for WLS domains, select Middleware, then weblogic.\nc. Select Sign In and accept the license agreement.\nd. Use your terminal to log in to Docker locally: docker login container-registry.oracle.com.\ne. Later in this sample, when you run WebLogic Image Tool commands, the tool will use the image as a base image for creating model images. Specifically, the tool will implicitly call docker pull for one of the above licensed images as specified in the tool\u0026rsquo;s command line using the --fromImage parameter. For JRF, this sample specifies container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4, and for WLS, the sample specifies container-registry.oracle.com/middleware/weblogic:12.2.1.4.\nIf you prefer, you can create your own base image and then substitute this image name in the WebLogic Image Tool --fromImage parameter throughout this sample. See Preparing a Base Image.\n Download the latest WebLogic Deploying Tooling and WebLogic Image Tool installer ZIP files to your /tmp/mii-sample/model-images directory.\nBoth WDT and WIT are required to create your Model in Image Docker images. Download the latest version of each tool\u0026rsquo;s installer ZIP file to the /tmp/mii-sample/model-images directory.\nFor example, visit the GitHub WebLogic Deploy Tooling Releses and WebLogic Image Tool Releases web pages to determine the latest release version for each, and then, assuming the version numbers are 1.8.0 and 1.8.4 respectively, call:\n$ curl -m 30 -fL https://github.com/oracle/weblogic-deploy-tooling/releases/download/release-1.9.3/weblogic-deploy.zip \\ -o /tmp/mii-sample/model-images/weblogic-deploy.zip $ curl -m 30 -fL https://github.com/oracle/weblogic-image-tool/releases/download/release-1.8.4/imagetool.zip \\ -o /tmp/mii-sample/model-images/imagetool.zip Set up the WebLogic Image Tool.\nRun the following commands:\n$ cd /tmp/mii-sample/model-images $ unzip imagetool.zip $ ./imagetool/bin/imagetool.sh cache addInstaller \\ --type wdt \\ --version latest \\ --path /tmp/mii-sample/model-images/weblogic-deploy.zip These steps will install WIT to the /tmp/mii-sample/model-images/imagetool directory, plus put a wdt_latest entry in the tool\u0026rsquo;s cache which points to the WDT ZIP installer. We will use WIT later in the sample for creating model images.\n Additional prerequisites for JRF domains NOTE: If you\u0026rsquo;re using a WLS domain type, skip this section and continue here.\n JRF Prerequisites Contents Introduction to JRF setups Set up and initialize an infrastructure database Increase introspection job timeout Important considerations for RCU model attributes, domain resource attributes, and secrets Introduction to JRF setups NOTE: The requirements in this section are in addition to Prerequisites for all domain types.\n A JRF domain requires an infrastructure database, initializing this database with RCU, and configuring your domain to access this database. All of these steps must occur before you create your domain.\nSet up and initialize an infrastructure database A JRF domain requires an infrastructure database and also requires initializing this database with a schema and a set of tables. The following example shows how to set up a database and use the RCU tool to create the infrastructure schema for a JRF domain. The database is set up with the following attributes:\n Attribute Value database Kubernetes namespace default database Kubernetes pod oracle-db database image container-registry.oracle.com/database/enterprise:12.2.0.1-slim database password Oradoc_db1 infrastructure schema prefix FMW1 infrastructure schema password Oradoc_db1 database URL oracle-db.default.svc.cluster.local:1521/devpdb.k8s Ensure that you have access to the database image, and then create a deployment using it:\n Use a browser to log in to https://container-registry.oracle.com, select database-\u0026gt;enterprise and accept the license agreement.\n Get the database image:\n In the local shell, docker login container-registry.oracle.com. In the local shell, docker pull container-registry.oracle.com/database/enterprise:12.2.0.1-slim. Use the sample script in /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oracle-db-service to create an Oracle database running in the pod, oracle-db.\n$ cd /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oracle-db-service $ start-db-service.sh This script will deploy a database in the default namespace with the connect string oracle-db.default.svc.cluster.local:1521/devpdb.k8s, and administration password Oradoc_db1.\nThis step is based on the steps documented in Run a Database.\nWARNING: The Oracle Database Docker images are supported only for non-production use. For more details, see My Oracle Support note: Oracle Support for Database Running on Docker (Doc ID 2216342.1).\n Use the sample script in /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-rcu-schema to create the RCU schema with the schema prefix FMW1.\nNote that this script assumes Oradoc_db1 is the DBA password, Oradoc_db1 is the schema password, and that the database URL is oracle-db.default.svc.cluster.local:1521/devpdb.k8s.\n$ cd /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-rcu-schema $ ./create-rcu-schema.sh -s FMW1 -i container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4 NOTE: If you need to drop the repository, use this command:\n$ drop-rcu-schema.sh -s FMW1 Increase introspection job timeout The JRF domain home creation can take more time than the introspection job\u0026rsquo;s default timeout. You should increase the timeout for the introspection job. Use the configuration.introspectorJobActiveDeadlineSeconds in your domain resource to override the default with a value of at least 300 seconds (the default is 120 seconds). Note that the JRF versions of the domain resource files that are provided in /tmp/mii-sample/domain-resources already set this value.\nImportant considerations for RCU model attributes, domain resource attributes, and secrets To allow Model in Image to access the database and OPSS wallet, you must create an RCU access secret containing the database connect string, user name, and password that\u0026rsquo;s referenced from your model and an OPSS wallet password secret that\u0026rsquo;s referenced from your domain resource before deploying your domain. It\u0026rsquo;s also necessary to define an RCUDbInfo stanza in your model.\nThe sample includes examples of JRF models and domain resources in the /tmp/mii-sample/model-images and /tmp/mii-sample/domain-resources directories, and instructions in the following sections will describe setting up the RCU and OPSS secrets.\nWhen you follow the instructions later in this sample, avoid instructions that are WLS only, and substitute JRF for WLS in the corresponding model image tags and domain resource file names.\nFor example:\n JRF domain resources in this sample have an opss.walletPasswordSecret field that references a secret named sample-domain1-opss-wallet-password-secret, with password=welcome1.\n JRF image models in this sample have a domainInfo -\u0026gt; RCUDbInfo stanza that reference a sample-domain1-rcu-access secret with appropriate values for attributes rcu_prefix, rcu_schema_password, and rcu_db_conn_string for accessing the Oracle database that you deployed to the default namespace as one of the prerequisite steps.\n Important considerations for reusing or sharing OPSS tables We do not recommend that most users share OPSS tables. Extreme caution is required when sharing OPSS tables between domains.\n When you successfully deploy your JRF domain resource for the first time, the introspector job will initialize the OPSS tables for the domain using the domainInfo -\u0026gt; RCUDbInfo stanza in the WDT model plus the configuration.opss.walletPasswordSecret specified in the domain resource. The job will also create a new domain home. Finally, the operator will also capture an OPSS wallet file from the new domain\u0026rsquo;s local directory and place this file in a new Kubernetes ConfigMap.\nThere are scenarios when the domain needs to be recreated between updates, such as when WebLogic credentials are changed, security roles defined in the WDT model have been changed, or you want to share the same infrastructure tables with different domains. In these scenarios, the operator needs the walletPasswordSecret as well as the OPSS wallet file, together with the exact information in domainInfo -\u0026gt; RCUDbInfo so that the domain can be recreated and access the same set of tables. Without the wallet file and wallet password, you will not be able to recreate a domain accessing the same set of tables, therefore we strongly recommend that you back up the wallet file.\nTo recover a domain\u0026rsquo;s OPSS tables between domain restarts or to share an OPSS schema between different domains, it is necessary to extract this wallet file from the domain\u0026rsquo;s automatically deployed introspector ConfigMap and save the OPSS wallet password secret that was used for the original domain. The wallet password and wallet file are needed again when you recreate the domain or share the database with other domains.\nTo save the wallet file, assuming that your namespace is sample-domain1-ns and your domain UID is sample-domain1:\n $ kubectl -n sample-domain1-ns \\ get configmap sample-domain1-weblogic-domain-introspect-cm \\ -o jsonpath='{.data.ewallet\\.p12}' \\ \u0026gt; ./ewallet.p12 Alternatively, you can save the file using the sample\u0026rsquo;s wallet utility:\n $ /tmp/mii-sample/utils/opss-wallet.sh -n sample-domain1-ns -d sample-domain1 -wf ./ewallet.p12 # For help: /tmp/mii-sample/utils/opss-wallet.sh -? Important! Back up your wallet file to a safe location that can be retrieved later.\nTo reuse the wallet file in subsequent redeployments or to share the domain\u0026rsquo;s OPSS tables between different domains:\n Load the saved wallet file into a secret with a key named walletFile (again, assuming that your domain UID is sample-domain1 and your namespace is sample-domain1-ns): $ kubectl -n sample-domain1-ns create secret generic sample-domain1-opss-walletfile-secret \\ --from-file=walletFile=./ewallet.p12 $ kubectl -n sample-domain1-ns label secret sample-domain1-opss-walletfile-secret \\ weblogic.domainUID=`sample-domain1` Alternatively, use the sample\u0026rsquo;s wallet utility:\n $ /tmp/mii-sample/utils/opss-wallet.sh -n sample-domain1-ns -d sample-domain1 -wf ./ewallet.p12 -ws sample-domain1-opss-walletfile-secret # For help: /tmp/mii-sample/utils/opss-wallet.sh -? Modify your domain resource JRF YAML files to provide the wallet file secret name, for example: configuration: opss: # Name of secret with walletPassword for extracting the wallet walletPasswordSecret: sample-domain1-opss-wallet-password-secret # Name of secret with walletFile containing base64 encoded opss wallet walletFileSecret: sample-domain1-opss-walletfile-secret Note: The sample JRF domain resource files included in /tmp/mii-sample/domain-resources already have the above YAML stanza.\n Initial use case Contents Overview Image creation Image creation - Introduction Understanding our first archive Staging a ZIP file of the archive Staging model files Creating the image with WIT Deploy resources Deploy resources - Introduction Secrets Domain resource Overview In this use case, we set up an initial WebLogic domain. This involves:\n A WDT archive ZIP file that contains your applications. A WDT model that describes your WebLogic configuration. A Docker image that contains your WDT model files and archive. Creating secrets for the domain. Creating a domain resource for the domain that references your secrets and image. After the domain resource is deployed, the WebLogic operator will start an \u0026lsquo;introspector job\u0026rsquo; that converts your models into a WebLogic configuration, and then the operator will pass this configuration to each WebLogic Server in the domain.\nPerform the steps in Prerequisites for all domain types before performing the steps in this use case.\nIf you are taking the JRF path through the sample, then substitute JRF for WLS in your image names and directory paths. Also note that the JRF-v1 model YAML differs from the WLS-v1 YAML file (it contains an additional domainInfo -\u0026gt; RCUDbInfo stanza).\n Image creation - Introduction The goal of the initial use case \u0026lsquo;image creation\u0026rsquo; is to demonstrate using the WebLogic Image Tool to create an image named model-in-image:WLS-v1 from files that we will stage to /tmp/mii-sample/model-images/model-in-image:WLS-v1/. The staged files will contain a web application in a WDT archive, and WDT model configuration for a WebLogic Administration Server called admin-server and a WebLogic cluster called cluster-1.\nOverall, a Model in Image image must contain a WebLogic installation and also a WebLogic Deploy Tooling installation in its /u01/wdt/weblogic-deploy directory. In addition, if you have WDT model archive files, then the image must also contain these files in its /u01/wdt/models directory. Finally, an image may optionally also contain your WDT model YAML and properties files in the same /u01/wdt/models directory. If you do not specify WDT model YAML in your /u01/wdt/models directory, then the model YAML must be supplied dynamically using a Kubernetes ConfigMap that is referenced by your domain resource spec.model.configMap attribute. We will provide an example of using a model ConfigMap later in this sample.\nLet\u0026rsquo;s walk through the steps for creating the image model-in-image:WLS-v1:\n Understanding our first archive Staging a ZIP file of the archive Staging model files Creating the image with WIT Understanding our first archive The sample includes a predefined archive directory in /tmp/mii-sample/archives/archive-v1 that we will use to create an archive ZIP file for the image.\nThe archive top directory, named wlsdeploy, contains a directory named applications, which includes an \u0026lsquo;exploded\u0026rsquo; sample JSP web application in the directory, myapp-v1. Three useful aspects to remember about WDT archives are:\n A model image can contain multiple WDT archives. WDT archives can contain multiple applications, libraries, and other components. WDT archives have a well defined directory structure, which always has wlsdeploy as the top directory. If you are interested in the web application source, click here to see the JSP code. \u0026lt;%-- Copyright (c) 2019, 2020, Oracle Corporation and/or its affiliates. --%\u0026gt; \u0026lt;%-- Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. --%\u0026gt; \u0026lt;%@ page import=\u0026quot;javax.naming.InitialContext\u0026quot; %\u0026gt; \u0026lt;%@ page import=\u0026quot;javax.management.*\u0026quot; %\u0026gt; \u0026lt;%@ page import=\u0026quot;java.io.*\u0026quot; %\u0026gt; \u0026lt;% InitialContext ic = null; try { ic = new InitialContext(); String srName=System.getProperty(\u0026quot;weblogic.Name\u0026quot;); String domainUID=System.getenv(\u0026quot;DOMAIN_UID\u0026quot;); String domainName=System.getenv(\u0026quot;CUSTOM_DOMAIN_NAME\u0026quot;); out.println(\u0026quot;\u0026lt;html\u0026gt;\u0026lt;body\u0026gt;\u0026lt;pre\u0026gt;\u0026quot;); out.println(\u0026quot;*****************************************************************\u0026quot;); out.println(); out.println(\u0026quot;Hello World! This is version 'v1' of the mii-sample JSP web-app.\u0026quot;); out.println(); out.println(\u0026quot;Welcome to WebLogic server '\u0026quot; + srName + \u0026quot;'!\u0026quot;); out.println(); out.println(\u0026quot; domain UID = '\u0026quot; + domainUID +\u0026quot;'\u0026quot;); out.println(\u0026quot; domain name = '\u0026quot; + domainName +\u0026quot;'\u0026quot;); out.println(); MBeanServer mbs = (MBeanServer)ic.lookup(\u0026quot;java:comp/env/jmx/runtime\u0026quot;); // display the current server's cluster name Set\u0026lt;ObjectInstance\u0026gt; clusterRuntimes = mbs.queryMBeans(new ObjectName(\u0026quot;*:Type=ClusterRuntime,*\u0026quot;), null); out.println(\u0026quot;Found \u0026quot; + clusterRuntimes.size() + \u0026quot; local cluster runtime\u0026quot; + (String)((clusterRuntimes.size()!=1)?\u0026quot;s:\u0026quot;:\u0026quot;:\u0026quot;)); for (ObjectInstance clusterRuntime : clusterRuntimes) { String cName = (String)mbs.getAttribute(clusterRuntime.getObjectName(), \u0026quot;Name\u0026quot;); out.println(\u0026quot; Cluster '\u0026quot; + cName + \u0026quot;'\u0026quot;); } out.println(); // display local data sources ObjectName jdbcRuntime = new ObjectName(\u0026quot;com.bea:ServerRuntime=\u0026quot; + srName + \u0026quot;,Name=\u0026quot; + srName + \u0026quot;,Type=JDBCServiceRuntime\u0026quot;); ObjectName[] dataSources = (ObjectName[])mbs.getAttribute(jdbcRuntime, \u0026quot;JDBCDataSourceRuntimeMBeans\u0026quot;); out.println(\u0026quot;Found \u0026quot; + dataSources.length + \u0026quot; local data source\u0026quot; + (String)((dataSources.length!=1)?\u0026quot;s:\u0026quot;:\u0026quot;:\u0026quot;)); for (ObjectName dataSource : dataSources) { String dsName = (String)mbs.getAttribute(dataSource, \u0026quot;Name\u0026quot;); String dsState = (String)mbs.getAttribute(dataSource, \u0026quot;State\u0026quot;); out.println(\u0026quot; Datasource '\u0026quot; + dsName + \u0026quot;': State='\u0026quot; + dsState +\u0026quot;'\u0026quot;); } out.println(); out.println(\u0026quot;*****************************************************************\u0026quot;); } catch (Throwable t) { t.printStackTrace(new PrintStream(response.getOutputStream())); } finally { out.println(\u0026quot;\u0026lt;/pre\u0026gt;\u0026lt;/body\u0026gt;\u0026lt;/html\u0026gt;\u0026quot;); if (ic != null) ic.close(); } %\u0026gt; The application displays important details about the WebLogic Server that it\u0026rsquo;s running on: namely its domain name, cluster name, and server name, as well as the names of any data sources that are targeted to the server. You can also see that application output reports that it\u0026rsquo;s at version v1; we will update this to v2 in a future use case to demonstrate upgrading the application.\nStaging a ZIP file of the archive When we create our image, we will use the files in staging directory /tmp/mii-sample/model-in-image__WLS-v1. In preparation, we need it to contain a ZIP file of the WDT application archive.\nRun the following commands to create your application archive ZIP file and put it in the expected directory:\n# Delete existing archive.zip in case we have an old leftover version $ rm -f /tmp/mii-sample/model-images/model-in-image__WLS-v1/archive.zip # Move to the directory which contains the source files for our archive $ cd /tmp/mii-sample/archives/archive-v1 # Zip the archive to the location will later use when we run the WebLogic Image Tool $ zip -r /tmp/mii-sample/model-images/model-in-image__WLS-v1/archive.zip wlsdeploy Staging model files In this step, we explore the staged WDT model YAML file and properties in directory /tmp/mii-sample/model-in-image__WLS-v1. The model in this directory references the web application in our archive, configures a WebLogic Administration Server, and configures a WebLogic cluster. It consists of only two files, model.10.properties, a file with a single property, and, model.10.yaml, a YAML file with our WebLogic configuration model.10.yaml.\nCLUSTER_SIZE=5 Here is the WLS model.10.yaml:\ndomainInfo: AdminUserName: '@@SECRET:__weblogic-credentials__:username@@' AdminPassword: '@@SECRET:__weblogic-credentials__:password@@' ServerStartMode: 'prod' topology: Name: '@@ENV:CUSTOM_DOMAIN_NAME@@' AdminServerName: 'admin-server' Cluster: 'cluster-1': DynamicServers: ServerTemplate: 'cluster-1-template' ServerNamePrefix: 'managed-server' DynamicClusterSize: '@@PROP:CLUSTER_SIZE@@' MaxDynamicClusterSize: '@@PROP:CLUSTER_SIZE@@' MinDynamicClusterSize: '0' CalculatedListenPorts: false Server: 'admin-server': ListenPort: 7001 ServerTemplate: 'cluster-1-template': Cluster: 'cluster-1' ListenPort: 8001 appDeployments: Application: myapp: SourcePath: 'wlsdeploy/applications/myapp-v1' ModuleType: ear Target: 'cluster-1' Click here to expand the JRF `model.10.yaml`, and note the RCUDbInfo stanza and its references to a DOMAIN_UID-rcu-access secret. domainInfo: AdminUserName: '@@SECRET:__weblogic-credentials__:username@@' AdminPassword: '@@SECRET:__weblogic-credentials__:password@@' ServerStartMode: 'prod' RCUDbInfo: rcu_prefix: '@@SECRET:@@ENV:DOMAIN_UID@@-rcu-access:rcu_prefix@@' rcu_schema_password: '@@SECRET:@@ENV:DOMAIN_UID@@-rcu-access:rcu_schema_password@@' rcu_db_conn_string: '@@SECRET:@@ENV:DOMAIN_UID@@-rcu-access:rcu_db_conn_string@@' topology: AdminServerName: 'admin-server' Name: '@@ENV:CUSTOM_DOMAIN_NAME@@' Cluster: 'cluster-1': Server: 'admin-server': ListenPort: 7001 'managed-server1-c1-': Cluster: 'cluster-1' ListenPort: 8001 'managed-server2-c1-': Cluster: 'cluster-1' ListenPort: 8001 'managed-server3-c1-': Cluster: 'cluster-1' ListenPort: 8001 'managed-server4-c1-': Cluster: 'cluster-1' ListenPort: 8001 appDeployments: Application: myapp: SourcePath: 'wlsdeploy/applications/myapp-v1' ModuleType: ear Target: 'cluster-1' The model files:\n Define a WebLogic domain with:\n Cluster cluster-1 Administration Server admin-server A cluster-1 targeted ear application that\u0026rsquo;s located in the WDT archive ZIP file at wlsdeploy/applications/myapp-v1 Leverage macros to inject external values:\n The property file CLUSTER_SIZE property is referenced in the model YAML DynamicClusterSize and MaxDynamicClusterSize fields using a PROP macro. The model file domain name is injected using a custom environment variable named CUSTOM_DOMAIN_NAME using an ENV macro. We set this environment variable later in this sample using an env field in its domain resource. This conveniently provides a simple way to deploy multiple differently named domains using the same model image. The model file administrator user name and password are set using a weblogic-credentials secret macro reference to the WebLogic credential secret. This secret is in turn referenced using the weblogicCredentialsSecret field in the domain resource. The weblogic-credentials is a reserved name that always dereferences to the owning domain resource actual WebLogic credentials secret name. A Model in Image image can contain multiple properties files, archive ZIP files, and YAML files, but in this sample we use just one of each. For a full discussion of Model in Images model file naming conventions, file loading order, and macro syntax, see Model files in the Model in Image user documentation.\nCreating the image with WIT Note: If you are using JRF in this sample, substitute JRF for each occurrence of WLS in the imagetool command line below, plus substitute container-registry.oracle.com/middleware/fmw-infrastructure:12.2.1.4 for the --fromImage value.\n At this point, we have staged all of the files needed for image model-in-image:WLS-v1, they include:\n /tmp/mii-sample/model-images/weblogic-deploy.zip /tmp/mii-sample/model-images/model-in-image__WLS-v1/model.10.yaml /tmp/mii-sample/model-images/model-in-image__WLS-v1/model.10.properties /tmp/mii-sample/model-images/model-in-image__WLS-v1/archive.zip If you don\u0026rsquo;t see the weblogic-deploy.zip file, then it means that you missed a step in the prerequisites.\nNow let\u0026rsquo;s use the Image Tool to create an image named model-in-image:WLS-v1 that\u0026rsquo;s layered on a base WebLogic image. We\u0026rsquo;ve already set up this tool during the prerequisite steps at the beginning of this sample.\nRun the following commands to create the model image and verify that it worked:\n$ cd /tmp/mii-sample/model-images $ ./imagetool/bin/imagetool.sh update \\ --tag model-in-image:WLS-v1 \\ --fromImage container-registry.oracle.com/middleware/weblogic:12.2.1.4 \\ --wdtModel ./model-in-image__WLS-v1/model.10.yaml \\ --wdtVariables ./model-in-image__WLS-v1/model.10.properties \\ --wdtArchive ./model-in-image__WLS-v1/archive.zip \\ --wdtModelOnly \\ --wdtDomainType WLS If you don\u0026rsquo;t see the imagetool directory, then it means that you missed a step in the prerequisites.\nThis command runs the WebLogic Image Tool in its Model in Image mode, and does the following:\n Builds the final Docker image as a layer on the container-registry.oracle.com/middleware/weblogic:12.2.1.4 base image. Copies the WDT ZIP file that\u0026rsquo;s referenced in the WIT cache into the image. Note that we cached WDT in WIT using the keyword latest when we set up the cache during the sample prerequisites steps. This lets WIT implicitly assume its the desired WDT version and removes the need to pass a -wdtVersion flag. Copies the specified WDT model, properties, and application archives to image location /u01/wdt/models. When the command succeeds, it should end with output like:\n[INFO ] Build successful. Build time=36s. Image tag=model-in-image:WLS-v1 Also, if you run the docker images command, then you should see a Docker image named model-in-image:WLS-v1.\nDeploy resources - Introduction In this section we will deploy our new image to namespace sample-domain1-ns, including the following steps:\n Create a secret containing your WebLogic administrator user name and password. Create a secret containing your Model in Image runtime encryption password: All Model in Image domains must supply a runtime encryption secret with a password value. It is used to encrypt configuration that is passed around internally by the operator. The value must be kept private but can be arbitrary; you can optionally supply a different secret value every time you restart the domain. If your domain type is JRF, create secrets containing your RCU access URL, credentials, and prefix. Deploy a domain resource YAML file that references the new image. Wait for the domain\u0026rsquo;s pods to start and reach their ready state. Secrets First, create the secrets needed by both WLS and JRF type model domains. In this case, we have two secrets.\nRun the following kubectl commands to deploy the required secrets:\n$ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-weblogic-credentials \\ --from-literal=username=weblogic --from-literal=password=welcome1 $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-weblogic-credentials \\ weblogic.domainUID=sample-domain1 $ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-runtime-encryption-secret \\ --from-literal=password=my_runtime_password $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-runtime-encryption-secret \\ weblogic.domainUID=sample-domain1 Some important details about these secrets:\n The WebLogic credentials secret:\n It is required and must contain username and password fields. It must be referenced by the spec.weblogicCredentialsSecret field in your domain resource. It also must be referenced by macros in the domainInfo.AdminUserName and domainInfo.AdminPassWord fields in your model YAML file. The Model WDT runtime secret:\n This is a special secret required by Model in Image. It must contain a password field. It must be referenced using the spec.model.runtimeEncryptionSecret attribute in its domain resource. It must remain the same for as long as the domain is deployed to Kubernetes, but can be changed between deployments. It is used to encrypt data as it\u0026rsquo;s internally passed using log files from the domain\u0026rsquo;s introspector job and on to its WebLogic Server pods. Deleting and recreating the secrets:\n We delete a secret before creating it, otherwise the create command will fail if the secret already exists. This allows us to change the secret when using the kubectl create secret command. We name and label secrets using their associated domain UID for two reasons:\n To make it obvious which secrets belong to which domains. To make it easier to clean up a domain. Typical cleanup scripts use the weblogic.domainUID label as a convenience for finding all resources associated with a domain. If you\u0026rsquo;re following the JRF path through the sample, then you also need to deploy the additional secret referenced by macros in the JRF model RCUDbInfo clause, plus an OPSS wallet password secret. For details about the uses of these secrets, see the Model in Image user documentation.\n Click here for the commands for deploying additional secrets for JRF. $ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-rcu-access \\ --from-literal=rcu_prefix=FMW1 \\ --from-literal=rcu_schema_password=Oradoc_db1 \\ --from-literal=rcu_db_conn_string=oracle-db.default.svc.cluster.local:1521/devpdb.k8s $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-rcu-access \\ weblogic.domainUID=sample-domain1 $ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-opss-wallet-password-secret \\ --from-literal=walletPassword=welcome1 $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-opss-wallet-password-secret \\ weblogic.domainUID=sample-domain1 Domain resource Now let\u0026rsquo;s create a domain resource. A domain resource is the key resource that tells the operator how to deploy a WebLogic domain.\nCopy the following to a file called /tmp/mii-sample/mii-initial.yaml or similar, or use the file /tmp/mii-sample/domain-resources/WLS/mii-initial-d1-WLS-v1.yaml that is included in the sample source.\n Click here to expand the WLS domain resource YAML. # # This is an example of how to define a Domain resource. # # If you are using 3.0.0-rc1, then the version on the following line # should be `v8` not `v7`. apiVersion: \u0026quot;weblogic.oracle/v8\u0026quot; kind: Domain metadata: name: sample-domain1 namespace: sample-domain1-ns labels: weblogic.resourceVersion: domain-v2 weblogic.domainUID: sample-domain1 spec: # For 3.0.0-rc1, set to 'FromModel' to indicate 'Model in Image'. domainHomeSourceType: FromModel # The WebLogic Domain Home, this must be a location within # the image for 'Model in Image' domains. domainHome: /u01/domains/sample-domain1 # The WebLogic Server Docker image that the Operator uses to start the domain image: \u0026quot;model-in-image:WLS-v1\u0026quot; # Defaults to \u0026quot;Always\u0026quot; if image tag (version) is ':latest' imagePullPolicy: \u0026quot;IfNotPresent\u0026quot; # Identify which Secret contains the credentials for pulling an image #imagePullSecrets: #- name: regsecret # Identify which Secret contains the WebLogic Admin credentials, # the secret must contain 'username' and 'password' fields. webLogicCredentialsSecret: name: sample-domain1-weblogic-credentials # Whether to include the WebLogic server stdout in the pod's stdout, default is true includeServerOutInPodLog: true # Whether to enable overriding your log file location, see also 'logHome' #logHomeEnabled: false # The location for domain log, server logs, server out, and Node Manager log files # see also 'logHomeEnabled', 'volumes', and 'volumeMounts'. #logHome: /shared/logs/sample-domain1 # Set which WebLogic servers the Operator will start # - \u0026quot;NEVER\u0026quot; will not start any server in the domain # - \u0026quot;ADMIN_ONLY\u0026quot; will start up only the administration server (no managed servers will be started) # - \u0026quot;IF_NEEDED\u0026quot; will start all non-clustered servers, including the administration server, and clustered servers up to their replica count. serverStartPolicy: \u0026quot;IF_NEEDED\u0026quot; # Settings for all server pods in the domain including the introspector job pod serverPod: # Optional new or overridden environment variables for the domain's pods # - This sample uses CUSTOM_DOMAIN_NAME in its image model file # to set the Weblogic domain name env: - name: CUSTOM_DOMAIN_NAME value: \u0026quot;domain1\u0026quot; - name: JAVA_OPTIONS value: \u0026quot;-Dweblogic.StdoutDebugEnabled=false\u0026quot; - name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom \u0026quot; # Optional volumes and mounts for the domain's pods. See also 'logHome'. #volumes: #- name: weblogic-domain-storage-volume # persistentVolumeClaim: # claimName: sample-domain1-weblogic-sample-pvc #volumeMounts: #- mountPath: /shared # name: weblogic-domain-storage-volume # The desired behavior for starting the domain's administration server. adminServer: # The serverStartState legal values are \u0026quot;RUNNING\u0026quot; or \u0026quot;ADMIN\u0026quot; # \u0026quot;RUNNING\u0026quot; means the listed server will be started up to \u0026quot;RUNNING\u0026quot; mode # \u0026quot;ADMIN\u0026quot; means the listed server will be start up to \u0026quot;ADMIN\u0026quot; mode serverStartState: \u0026quot;RUNNING\u0026quot; # Setup a Kubernetes node port for the administration server default channel #adminService: # channels: # - channelName: default # nodePort: 30701 # The number of managed servers to start for unlisted clusters replicas: 1 # The desired behavior for starting a specific cluster's member servers clusters: - clusterName: cluster-1 serverStartState: \u0026quot;RUNNING\u0026quot; replicas: 2 # Change the `restartVersion` to force the introspector job to rerun # and apply any new model configuration, to also force a subsequent # roll of your domain's WebLogic pods. restartVersion: '1' configuration: # Settings for domainHomeSourceType 'FromModel' model: # Valid model domain types are 'WLS', 'JRF', and 'RestrictedJRF', default is 'WLS' domainType: \u0026quot;WLS\u0026quot; # Optional configmap for additional models and variable files #configMap: sample-domain1-wdt-config-map # All 'FromModel' domains require a runtimeEncryptionSecret with a 'password' field runtimeEncryptionSecret: sample-domain1-runtime-encryption-secret # Secrets that are referenced by model yaml macros # (the model yaml in the optional configMap or in the image) #secrets: #- sample-domain1-datasource-secret Click here to expand the JRF domain resource YAML. # Copyright (c) 2020, Oracle Corporation and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl. # # This is an example of how to define a Domain resource. # # If you are using 3.0.0-rc1, then the version on the following line # should be `v8` not `v7`. apiVersion: \u0026quot;weblogic.oracle/v7\u0026quot; kind: Domain metadata: name: sample-domain1 namespace: sample-domain1-ns labels: weblogic.resourceVersion: domain-v2 weblogic.domainUID: sample-domain1 spec: # Set to 'FromModel' to indicate 'Model in Image'. domainHomeSourceType: FromModel # The WebLogic Domain Home, this must be a location within # the image for 'Model in Image' domains. domainHome: /u01/domains/sample-domain1 # The WebLogic Server Docker image that the Operator uses to start the domain image: \u0026quot;model-in-image:JRF-v1\u0026quot; # Defaults to \u0026quot;Always\u0026quot; if image tag (version) is ':latest' imagePullPolicy: \u0026quot;IfNotPresent\u0026quot; # Identify which Secret contains the credentials for pulling an image #imagePullSecrets: #- name: regsecret # Identify which Secret contains the WebLogic Admin credentials, # the secret must contain 'username' and 'password' fields. webLogicCredentialsSecret: name: sample-domain1-weblogic-credentials # Whether to include the WebLogic server stdout in the pod's stdout, default is true includeServerOutInPodLog: true # Whether to enable overriding your log file location, see also 'logHome' #logHomeEnabled: false # The location for domain log, server logs, server out, and Node Manager log files # see also 'logHomeEnabled', 'volumes', and 'volumeMounts'. #logHome: /shared/logs/sample-domain1 # Set which WebLogic servers the Operator will start # - \u0026quot;NEVER\u0026quot; will not start any server in the domain # - \u0026quot;ADMIN_ONLY\u0026quot; will start up only the administration server (no managed servers will be started) # - \u0026quot;IF_NEEDED\u0026quot; will start all non-clustered servers, including the administration server, and clustered servers up to their replica count. serverStartPolicy: \u0026quot;IF_NEEDED\u0026quot; # Settings for all server pods in the domain including the introspector job pod serverPod: # Optional new or overridden environment variables for the domain's pods # - This sample uses CUSTOM_DOMAIN_NAME in its image model file # to set the Weblogic domain name env: - name: CUSTOM_DOMAIN_NAME value: \u0026quot;domain1\u0026quot; - name: JAVA_OPTIONS value: \u0026quot;-Dweblogic.StdoutDebugEnabled=false\u0026quot; - name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom \u0026quot; # Optional volumes and mounts for the domain's pods. See also 'logHome'. #volumes: #- name: weblogic-domain-storage-volume # persistentVolumeClaim: # claimName: sample-domain1-weblogic-sample-pvc #volumeMounts: #- mountPath: /shared # name: weblogic-domain-storage-volume # The desired behavior for starting the domain's administration server. adminServer: # The serverStartState legal values are \u0026quot;RUNNING\u0026quot; or \u0026quot;ADMIN\u0026quot; # \u0026quot;RUNNING\u0026quot; means the listed server will be started up to \u0026quot;RUNNING\u0026quot; mode # \u0026quot;ADMIN\u0026quot; means the listed server will be start up to \u0026quot;ADMIN\u0026quot; mode serverStartState: \u0026quot;RUNNING\u0026quot; # Setup a Kubernetes node port for the administration server default channel #adminService: # channels: # - channelName: default # nodePort: 30701 # The number of managed servers to start for unlisted clusters replicas: 1 # The desired behavior for starting a specific cluster's member servers clusters: - clusterName: cluster-1 serverStartState: \u0026quot;RUNNING\u0026quot; replicas: 2 # Change the restartVersion to force the introspector job to rerun # and apply any new model configuration, to also force a subsequent # roll of your domain's WebLogic pods. restartVersion: '1' configuration: # Settings for domainHomeSourceType 'FromModel' model: # Valid model domain types are 'WLS', 'JRF', and 'RestrictedJRF', default is 'WLS' domainType: \u0026quot;JRF\u0026quot; # Optional configmap for additional models and variable files #configMap: sample-domain1-wdt-config-map # All 'FromModel' domains require a runtimeEncryptionSecret with a 'password' field runtimeEncryptionSecret: sample-domain1-runtime-encryption-secret # Secrets that are referenced by model yaml macros # (the model yaml in the optional configMap or in the image) secrets: #- sample-domain1-datasource-secret - sample-domain1-rcu-access # Increase the introspector job active timeout value for JRF use cases introspectorJobActiveDeadlineSeconds: 300 opss: # Name of secret with walletPassword for extracting the wallet, used for JRF domains walletPasswordSecret: sample-domain1-opss-wallet-password-secret # Name of secret with walletFile containing base64 encoded opss wallet, used for JRF domains #walletFileSecret: sample-domain1-opss-walletfile-secret Run the following command to create the domain custom resource:\n$ kubectl apply -f /tmp/mii-sample/domain-resources/WLS/mii-initial-d1-WLS-v1.yaml Note: If you are choosing not to use the predefined domain resource YAML file and instead created your own domain resource file earlier, then substitute your custom file name in the above command. You might recall that we suggested naming it /tmp/mii-sample/mii-initial.yaml.\n If you run kubectl get pods -n sample-domain1-ns --watch, then you should see the introspector job run and your WebLogic Server pods start. The output should look something like this:\n Click here to expand. $ kubectl get pods -n sample-domain1-ns --watch NAME READY STATUS RESTARTS AGE sample-domain1-introspect-domain-job-lqqj9 0/1 Pending 0 0s sample-domain1-introspect-domain-job-lqqj9 0/1 ContainerCreating 0 0s sample-domain1-introspect-domain-job-lqqj9 1/1 Running 0 1s sample-domain1-introspect-domain-job-lqqj9 0/1 Completed 0 65s sample-domain1-introspect-domain-job-lqqj9 0/1 Terminating 0 65s sample-domain1-admin-server 0/1 Pending 0 0s sample-domain1-admin-server 0/1 ContainerCreating 0 0s sample-domain1-admin-server 0/1 Running 0 1s sample-domain1-admin-server 1/1 Running 0 32s sample-domain1-managed-server1 0/1 Pending 0 0s sample-domain1-managed-server2 0/1 Pending 0 0s sample-domain1-managed-server1 0/1 ContainerCreating 0 0s sample-domain1-managed-server2 0/1 ContainerCreating 0 0s sample-domain1-managed-server1 0/1 Running 0 2s sample-domain1-managed-server2 0/1 Running 0 2s sample-domain1-managed-server1 1/1 Running 0 43s sample-domain1-managed-server2 1/1 Running 0 42s Alternatively, you can run /tmp/mii-sample/utils/wl-pod-wait.sh -p 3. This is a utility script that provides useful information about a domain\u0026rsquo;s pods and waits for them to reach a ready state, reach their target restartVersion, and reach their target image before exiting.\n Click here to expand the `wl-pod-wait.sh` usage. $ ./wl-pod-wait.sh -? Usage: wl-pod-wait.sh [-n mynamespace] [-d mydomainuid] \\ [-p expected_pod_count] \\ [-t timeout_secs] \\ [-q] Exits non-zero if 'timeout_secs' is reached before 'pod_count' is reached. Parameters: -d \u0026lt;domain_uid\u0026gt; : Defaults to 'sample-domain1'. -n \u0026lt;namespace\u0026gt; : Defaults to 'sample-domain1-ns'. pod_count \u0026gt; 0 : Wait until exactly 'pod_count' WebLogic server pods for a domain all (a) are ready, (b) have the same 'domainRestartVersion' label value as the current domain resource's 'spec.restartVersion, and (c) have the same image as the current domain resource's image. pod_count = 0 : Wait until there are no running WebLogic server pods for a domain. The default. -t \u0026lt;timeout\u0026gt; : Timeout in seconds. Defaults to '600'. -q : Quiet mode. Show only a count of wl pods that have reached the desired criteria. -? : This help. Click here to expand sample output from `wl-pod-wait.sh`. @@ [2020-04-30T13:50:42][seconds=0] Info: Waiting up to 600 seconds for exactly '3' WebLogic server pods to reach the following criteria: @@ [2020-04-30T13:50:42][seconds=0] Info: ready='true' @@ [2020-04-30T13:50:42][seconds=0] Info: image='model-in-image:WLS-v1' @@ [2020-04-30T13:50:42][seconds=0] Info: domainRestartVersion='1' @@ [2020-04-30T13:50:42][seconds=0] Info: namespace='sample-domain1-ns' @@ [2020-04-30T13:50:42][seconds=0] Info: domainUID='sample-domain1' @@ [2020-04-30T13:50:42][seconds=0] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:50:42][seconds=0] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----- ----- --------- 'sample-domain1-introspect-domain-job-rkdkg' '' '' '' 'Pending' @@ [2020-04-30T13:50:45][seconds=3] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:50:45][seconds=3] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----- ----- --------- 'sample-domain1-introspect-domain-job-rkdkg' '' '' '' 'Running' @@ [2020-04-30T13:51:50][seconds=68] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:51:50][seconds=68] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE ---- ------- ----- ----- ----- @@ [2020-04-30T13:51:59][seconds=77] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:51:59][seconds=77] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE ----------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:52:02][seconds=80] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:52:02][seconds=80] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE ----------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'false' 'Running' @@ [2020-04-30T13:52:32][seconds=110] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:52:32][seconds=110] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'false' 'Pending' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:52:34][seconds=112] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:52:34][seconds=112] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'false' 'Running' @@ [2020-04-30T13:53:14][seconds=152] Info: '3' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:53:14][seconds=152] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:53:14][seconds=152] Info: Success! If you see an error, then consult Debugging in the Model in Image user guide.\nInvoke the web application Now that all the initial use case resources have been deployed, you can invoke the sample web application through the Traefik ingress controller\u0026rsquo;s NodePort. Note: The web application will display a list of any data sources it finds, but we don\u0026rsquo;t expect it to find any because the model doesn\u0026rsquo;t contain any at this point.\nSend a web application request to the load balancer:\n$ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp Or, if Traefik is unavailable and your Administration Server pod is running, you can use kubectl exec:\n$ kubectl exec -n sample-domain1-ns sample-domain1-admin-server -- bash -c \\ \u0026quot;curl -s -S -m 10 http://sample-domain1-cluster-cluster-1:8001/myapp_war/index.jsp\u0026quot; You should see output like the following:\n$ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp \u0026lt;html\u0026gt;\u0026lt;body\u0026gt;\u0026lt;pre\u0026gt; ***************************************************************** Hello World! This is version 'v1' of the mii-sample JSP web-app. Welcome to WebLogic server 'managed-server2'! domain UID = 'sample-domain1' domain name = 'domain1' Found 1 local cluster runtime: Cluster 'cluster-1' Found 0 local data sources: ***************************************************************** \u0026lt;/pre\u0026gt;\u0026lt;/body\u0026gt;\u0026lt;/html\u0026gt; Note: If you\u0026rsquo;re running your curl commands on a remote machine, then substitute localhost with an external address suitable for contacting your Kubernetes cluster. A Kubernetes cluster address that often works can be obtained by using the address just after https:// in the KubeDNS line of the output from the kubectl cluster-info command.\nIf you want to continue to the next use case, then leave your domain running.\nUpdate1 use case This use case demonstrates dynamically adding a data source to your running domain. It demonstrates several features of WDT and Model in Image:\n The syntax used for updating a model is exactly the same syntax you use for creating the original model. A domain\u0026rsquo;s model can be updated dynamically by supplying a model update in a file in a Kubernetes ConfigMap. Model updates can be as simple as changing the value of a single attribute, or more complex, such as adding a JMS Server. For a detailed discussion of model updates, see Runtime Updates in the Model in Image user guide.\nThe operator does not support all possible dynamic model updates. For model update limitations, consult Runtime Updates in the Model in Image user docs, and carefully test any model update before attempting a dynamic update in production.\n Here are the steps:\n Ensure that you have a running domain.\nMake sure you have deployed the domain from the Initial use case.\n Create a data source model YAML file.\nCreate a WDT model snippet for a data source (or use the example provided). Make sure that its target is set to cluster-1, and that its initial capacity is set to 0.\nThe reason for the latter is to prevent the data source from causing a WebLogic Server startup failure if it can\u0026rsquo;t find the database, which would be likely to happen because we haven\u0026rsquo;t deployed one (unless you\u0026rsquo;re using the JRF path through the sample).\nHere\u0026rsquo;s an example data source model configuration that meets these criteria:\nresources: JDBCSystemResource: mynewdatasource: Target: 'cluster-1' JdbcResource: JDBCDataSourceParams: JNDIName: [ jdbc/mydatasource1, jdbc/mydatasource2 ] GlobalTransactionsProtocol: TwoPhaseCommit JDBCDriverParams: DriverName: oracle.jdbc.xa.client.OracleXADataSource URL: '@@SECRET:@@ENV:DOMAIN_UID@@-datasource-secret:url@@' PasswordEncrypted: '@@SECRET:@@ENV:DOMAIN_UID@@-datasource-secret:password@@' Properties: user: Value: 'sys as sysdba' oracle.net.CONNECT_TIMEOUT: Value: 5000 oracle.jdbc.ReadTimeout: Value: 30000 JDBCConnectionPoolParams: InitialCapacity: 0 MaxCapacity: 1 TestTableName: SQL ISVALID TestConnectionsOnReserve: true Place the above model snippet in a file named /tmp/mii-sample/mydatasource.yaml and then use it in the later step where we deploy the model ConfigMap, or alternatively, use the same data source that\u0026rsquo;s provided in /tmp/mii-sample/model-configmaps/datasource/model.20.datasource.yaml.\n Create the data source secret.\nThe data source references a new secret that needs to be created. Run the following commands to create the secret:\n$ kubectl -n sample-domain1-ns create secret generic \\ sample-domain1-datasource-secret \\ --from-literal=password=Oradoc_db1 \\ --from-literal=url=jdbc:oracle:thin:@oracle-db.default.svc.cluster.local:1521/devpdb.k8s $ kubectl -n sample-domain1-ns label secret \\ sample-domain1-datasource-secret \\ weblogic.domainUID=sample-domain1 We name and label secrets using their associated domain UID for two reasons:\n To make it obvious which secret belongs to which domains. To make it easier to clean up a domain. Typical cleanup scripts use the weblogic.domainUID label as a convenience for finding all the resources associated with a domain. Create a ConfigMap with the WDT model that contains the data source definition.\nRun the following commands:\n$ kubectl -n sample-domain1-ns create configmap sample-domain1-wdt-config-map \\ --from-file=/tmp/mii-sample/model-configmaps/datasource $ kubectl -n sample-domain1-ns label configmap sample-domain1-wdt-config-map \\ weblogic.domainUID=sample-domain1 If you\u0026rsquo;ve created your own data source file, then substitute the file name in the --from-file= parameter (we suggested /tmp/mii-sample/mydatasource.yaml earlier). Note that the -from-file= parameter can reference a single file, in which case it puts the designated file in the ConfigMap, or it can reference a directory, in which case it populates the ConfigMap with all of the files in the designated directory. We name and label ConfigMap using their associated domain UID for two reasons:\n To make it obvious which ConfigMap belong to which domains. To make it easier to cleanup a domain. Typical cleanup scripts use the weblogic.domainUID label as a convenience for finding all resources associated with a domain. Update your domain resource to refer to the ConfigMap and secret.\n Option 1: Update your current domain resource file from the \u0026ldquo;Initial\u0026rdquo; use case.\n Add the secret to its spec.configuration.secrets stanza:\nspec: ... configuration: ... secrets: - sample-domain1-datasource-secret (Leave any existing secrets in place.)\n Change its spec.configuration.model.configMap to look like:\nspec: ... configuration: ... model: ... configMap: sample-domain1-wdt-config-map Apply your changed domain resource:\n$ kubectl apply -f your-domain-resource.yaml Option 2: Use the updated domain resource file that is supplied with the sample:\n$ kubectl apply -f /tmp/miisample/domain-resources/mii-update1-d1-WLS-v1-ds.yaml Restart (\u0026lsquo;roll\u0026rsquo;) the domain.\nNow that the data source is deployed in a ConfigMap and its secret is also deployed, and we have applied an updated domain resource with its spec.configuration.model.configMap and spec.configuration.secrets referencing the ConfigMap and secret, let\u0026rsquo;s tell the operator to roll the domain.\nWhen a model domain restarts, it will rerun its introspector job in order to regenerate its configuration, and it will also pass the configuration changes found by the introspector to each restarted server. One way to cause a running domain to restart is to change the domain\u0026rsquo;s spec.restartVersion. To do this:\n Option 1: Edit your domain custom resource.\n Call kubectl -n sample-domain1-ns edit domain sample-domain1. Edit the value of the spec.restartVersion field and save. The field is a string; typically, you use a number in this field and increment it with each restart. Option 2: Dynamically change your domain using kubectl patch.\n To get the current restartVersion call:\n$ kubectl -n sample-domain1-ns get domain sample-domain1 '-o=jsonpath={.spec.restartVersion}' Choose a new restart version that\u0026rsquo;s different from the current restart version.\n The field is a string; typically, you use a number in this field and increment it with each restart. Use kubectl patch to set the new value. For example, assuming the new restart version is 2:\n$ kubectl -n sample-domain1-ns patch domain sample-domain1 --type=json '-p=[{\u0026quot;op\u0026quot;: \u0026quot;replace\u0026quot;, \u0026quot;path\u0026quot;: \u0026quot;/spec/restartVersion\u0026quot;, \u0026quot;value\u0026quot;: \u0026quot;2\u0026quot; }]' Option 3: Use the sample helper script.\n Call /tmp/mii-sample/utils/patch-restart-version.sh -n sample-domain1-ns -d sample-domain1. This will perform the same kubectl get and kubectl patch commands as Option 2. Wait for the roll to complete.\nNow that you\u0026rsquo;ve started a domain roll, you\u0026rsquo;ll need to wait for it to complete if you want to verify that the data source was deployed.\n One way to do this is to call kubectl get pods -n sample-domain1-ns --watch and wait for the pods to cycle back to their ready state.\n Alternatively, you can run /tmp/mii-sample/utils/wl-pod-wait.sh -p 3. This is a utility script that provides useful information about a domain\u0026rsquo;s pods and waits for them to reach a ready state, reach their target restartVersion, and reach their target image before exiting.\n Click here to expand the `wl-pod-wait.sh` usage. $ ./wl-pod-wait.sh -? Usage: wl-pod-wait.sh [-n mynamespace] [-d mydomainuid] \\ [-p expected_pod_count] \\ [-t timeout_secs] \\ [-q] Exits non-zero if 'timeout_secs' is reached before 'pod_count' is reached. Parameters: -d \u0026lt;domain_uid\u0026gt; : Defaults to 'sample-domain1'. -n \u0026lt;namespace\u0026gt; : Defaults to 'sample-domain1-ns'. pod_count \u0026gt; 0 : Wait until exactly 'pod_count' WebLogic server pods for a domain all (a) are ready, (b) have the same 'domainRestartVersion' label value as the current domain resource's 'spec.restartVersion, and (c) have the same image as the current domain resource's image. pod_count = 0 : Wait until there are no running WebLogic server pods for a domain. The default. -t \u0026lt;timeout\u0026gt; : Timeout in seconds. Defaults to '600'. -q : Quiet mode. Show only a count of wl pods that have reached the desired criteria. -? : This help. Click here to expand sample output from `wl-pod-wait.sh` that shows a rolling domain. @@ [2020-04-30T13:53:19][seconds=0] Info: Waiting up to 600 seconds for exactly '3' WebLogic server pods to reach the following criteria: @@ [2020-04-30T13:53:19][seconds=0] Info: ready='true' @@ [2020-04-30T13:53:19][seconds=0] Info: image='model-in-image:WLS-v1' @@ [2020-04-30T13:53:19][seconds=0] Info: domainRestartVersion='2' @@ [2020-04-30T13:53:19][seconds=0] Info: namespace='sample-domain1-ns' @@ [2020-04-30T13:53:19][seconds=0] Info: domainUID='sample-domain1' @@ [2020-04-30T13:53:19][seconds=0] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:53:19][seconds=0] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-introspect-domain-job-wlkpr' '' '' '' 'Pending' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:53:20][seconds=1] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:53:20][seconds=1] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-introspect-domain-job-wlkpr' '' '' '' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:18][seconds=59] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:18][seconds=59] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------------------- ------- ----------------------- ------ ----------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-introspect-domain-job-wlkpr' '' '' '' 'Succeeded' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:19][seconds=60] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:19][seconds=60] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:31][seconds=72] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:31][seconds=72] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '1' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:40][seconds=81] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:40][seconds=81] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:52][seconds=93] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:52][seconds=93] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:54:58][seconds=99] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:54:58][seconds=99] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Pending' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:00][seconds=101] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:00][seconds=101] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:12][seconds=113] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:12][seconds=113] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:24][seconds=125] Info: '0' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:24][seconds=125] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:33][seconds=134] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:33][seconds=134] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:34][seconds=135] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:34][seconds=135] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '1' 'model-in-image:WLS-v1' 'false' 'Pending' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:40][seconds=141] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:40][seconds=141] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:55:44][seconds=145] Info: '1' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:55:44][seconds=145] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'false' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:56:25][seconds=186] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:25][seconds=186] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:56:26][seconds=187] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:26][seconds=187] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '1' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:56:30][seconds=191] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:30][seconds=191] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:56:34][seconds=195] Info: '2' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:56:34][seconds=195] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------- --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '2' 'model-in-image:WLS-v1' 'false' 'Pending' @@ [2020-04-30T13:57:09][seconds=230] Info: '3' WebLogic pods currently match all criteria, expecting '3'. @@ [2020-04-30T13:57:09][seconds=230] Info: Introspector and WebLogic pods with same namespace and domain-uid: NAME VERSION IMAGE READY PHASE -------------------------------- ------- ----------------------- ------ --------- 'sample-domain1-admin-server' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server1' '2' 'model-in-image:WLS-v1' 'true' 'Running' 'sample-domain1-managed-server2' '2' 'model-in-image:WLS-v1' 'true' 'Running' @@ [2020-04-30T13:57:09][seconds=230] Info: Success! After your domain is running, you can call the sample web application to determine if the data source was deployed.\nSend a web application request to the ingress controller:\n$ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp Or, if Traefik is unavailable and your Administration Server pod is running, you can run kubectl exec:\n$ kubectl exec -n sample-domain1-ns sample-domain1-admin-server -- bash -c \\ \u0026quot;curl -s -S -m 10 http://sample-domain1-cluster-cluster-1:8001/myapp_war/index.jsp\u0026quot; You should see something like the following:\n Click here to see the expected web application output. $ curl -s -S -m 10 -H 'host: sample-domain1-cluster-cluster-1.mii-sample.org' \\ http://localhost:30305/myapp_war/index.jsp \u0026lt;html\u0026gt;\u0026lt;body\u0026gt;\u0026lt;pre\u0026gt; ***************************************************************** Hello World! This is version 'v1' of the mii-sample JSP web-app. Welcome to WebLogic server 'managed-server1'! domain UID = 'sample-domain1' domain name = 'domain1' Found 1 local cluster runtime: Cluster 'cluster-1' Found 1 local data source: Datasource 'mynewdatasource': State='Running' ***************************************************************** \u0026lt;/pre\u0026gt;\u0026lt;/body\u0026gt;\u0026lt;/html\u0026gt; If you see an error, then consult Debugging in the Model in Image user guide.\nThis completes the sample scenarios.\nCleanup To remove the resources you have created in these samples:\n Delete the domain resources.\n$ /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/delete-domain/delete-weblogic-domain-resources.sh -d sample-domain1 $ /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/delete-domain/delete-weblogic-domain-resources.sh -d sample-domain2 This deletes the domain and any related resources that are labeled with the domain UID sample-domain1 and sample-domain2.\nIt leaves the namespace intact, the operator running, the load balancer running (if installed), and the database running (if installed).\n Note: When you delete a domain, the operator should detect your domain deletion and shut down its pods. Wait for these pods to exit before deleting the operator that monitors the sample-domain1-ns namespace. You can monitor this process using the command kubectl get pods -n sample-domain1-ns --watch (ctrl-c to exit).\n If you set up the Traefik ingress controller:\n$ helm delete --purge traefik-operator $ kubectl delete namespace traefik If you set up a database for JRF:\n$ /tmp/weblogic-kubernetes-operator/kubernetes/samples/scripts/create-oracle-db-service/stop-db-service.sh Delete the operator and its namespace:\n$ helm delete --purge sample-weblogic-operator $ kubectl delete namespace sample-weblogic-operator-ns Delete the domain\u0026rsquo;s namespace:\n$ kubectl delete namespace sample-domain1-ns Delete the images you may have created in this sample:\n$ docker image rm model-in-image:WLS-v1 $ docker image rm model-in-image:WLS-v2 $ docker image rm model-in-image:JRF-v1 $ docker image rm model-in-image:JRF-v2 References For references to the relevant user documentation, see:\n Model in Image user documentation Oracle WebLogic Server Deploy Tooling Oracle WebLogic Image Tool "
},
{
"uri": "/weblogic-kubernetes-operator/samples/simple/rest/",
diff --git a/docs/2.6.0/samples/simple/domains/model-in-image/index.html b/docs/2.6.0/samples/simple/domains/model-in-image/index.html
index 172388cffc1..5879a16a9cc 100644
--- a/docs/2.6.0/samples/simple/domains/model-in-image/index.html
+++ b/docs/2.6.0/samples/simple/domains/model-in-image/index.html
@@ -3092,7 +3092,7 @@
Prerequisites for all domain types
Download the latest WebLogic Deploying Tooling and WebLogic Image Tool installer ZIP files to your /tmp/mii-sample/model-images directory.
Both WDT and WIT are required to create your Model in Image Docker images. Download the latest version of each tool’s installer ZIP file to the /tmp/mii-sample/model-images directory.
A class which implements different behavior based on the strategy defined for finding domain namespaces.
+ Uses the Visitor pattern (see https://en.wikipedia.org/wiki/Visitor_pattern). Implementations should
+ either define all of the strategy-specific methods or at least one of them as well as the default selection.
Returns the enum constant of this type with the specified name.
+The string must match exactly an identifier used to declare an
+enum constant in this type. (Extraneous whitespace characters are
+not permitted.)
+
+
Parameters:
+
name - the name of the enum constant to be returned.
+
Returns:
+
the enum constant with the specified name
+
Throws:
+
java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
+
java.lang.NullPointerException - if the argument is null
Returns the enum constant of this type with the specified name.
+The string must match exactly an identifier used to declare an
+enum constant in this type. (Extraneous whitespace characters are
+not permitted.)
+
+
Parameters:
+
name - the name of the enum constant to be returned.
+
Returns:
+
the enum constant with the specified name
+
Throws:
+
java.lang.IllegalArgumentException - if this enum type has no constant with the specified name
+
java.lang.NullPointerException - if the argument is null
public class StuckPodProcessing
+extends java.lang.Object
+
Under certain circumstances, when a Kubernetes node goes down, it may mark its pods as terminating, but never
+ actually remove them. This code detects such cases, deletes the pods and triggers the necessary make-right flows.
MainTuning(int domainPresenceFailureRetrySeconds,
int domainPresenceFailureRetryMaxCount,
int domainPresenceRecheckIntervalSeconds,
-int targetNamespaceRecheckIntervalSeconds,
+int domainNamespaceRecheckIntervalSeconds,
int statusUpdateTimeoutSeconds,
int unchangedCountToDelayStatusRecheck,
+int stuckPodRecheckSeconds,
long initialShortDelay,
long eventualLongDelay)
publicMainTuning(int domainPresenceFailureRetrySeconds,
int domainPresenceFailureRetryMaxCount,
int domainPresenceRecheckIntervalSeconds,
-int targetNamespaceRecheckIntervalSeconds,
+int domainNamespaceRecheckIntervalSeconds,
int statusUpdateTimeoutSeconds,
int unchangedCountToDelayStatusRecheck,
+int stuckPodRecheckSeconds,
long initialShortDelay,
long eventualLongDelay)
On a watch call: when specified, shows changes that occur after that particular version of a
+ resource. Defaults to changes from the beginning of history. On a list call: when specified,
+ requests values at least as recent as the specified value. Defaults to returning the result
+ from remote storage based on quorum-read flag; - if it's 0, then we simply return what we
+ currently have in cache, no guarantee; - if set to non zero, then the result is at least as
+ fresh as given version.
Kubernetes mounts ConfigMaps in the Pod's file-system as directories where the contained files
are named with the keys and the contents of the file are the values.