diff --git a/docs/_static/css/custom.css b/docs/_static/css/custom.css
new file mode 100644
index 00000000..d0464358
--- /dev/null
+++ b/docs/_static/css/custom.css
@@ -0,0 +1,6 @@
+img.boxed-img {
+ border: 1px solid #ddd;
+ border-radius: 4px;
+ padding: 5px;
+ margin-bottom: 20px;
+}
diff --git a/docs/authors.rst b/docs/authors.rst
deleted file mode 100644
index e122f914..00000000
--- a/docs/authors.rst
+++ /dev/null
@@ -1 +0,0 @@
-.. include:: ../AUTHORS.rst
diff --git a/docs/conf.py b/docs/conf.py
index e5e01adc..af5c5780 100755
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -15,8 +15,8 @@
import sys
import os
-import recommonmark
-from recommonmark import transform as md_transform
+# import recommonmark
+# from recommonmark import transform as md_transform
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
@@ -37,7 +37,8 @@
for module in ('cinderlib', 'google', 'google.protobuf', 'eventlet',
'kubernetes', 'grpc', 'concurrent', 'os_brick',
- 'os_brick.initiator', 'oslo_concurrency'):
+ 'os_brick.initiator', 'oslo_concurrency', 'oslo_log',
+ 'oslo_context'):
modulefaker.fake_module(module)
@@ -55,10 +56,6 @@
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
-source_parsers = {
- '.md': 'recommonmark.parser.CommonMarkParser',
-}
-
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
@@ -69,8 +66,8 @@
master_doc = 'index'
# General information about the project.
-project = u'Ember CSI plugin'
-copyright = u"2018, Gorka Eguileor"
+project = u'Ember CSI'
+copyright = u"2018, Red Hat"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
@@ -145,12 +142,12 @@
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
-#html_logo = None
+html_logo = "img/logo.svg"
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
-#html_favicon = None
+html_favicon = "img/favicon.png"
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
@@ -158,6 +155,11 @@
# "default.css".
html_static_path = ['_static']
+html_css_files = [
+ 'css/custom.css',
+ 'custom.css',
+]
+
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
@@ -288,11 +290,21 @@
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
-github_doc_root = 'https://github.com/akrog/ember-csi/tree/master/docs/'
+rst_prolog = """
+.. |br| raw:: html
+
+
+"""
+
+github_doc_root = 'https://github.com/embercsi/ember-csi/tree/master/docs/'
def setup(app):
- app.add_config_value('recommonmark_config', {
- 'url_resolver': lambda url: github_doc_root + url,
- 'auto_toc_tree_section': 'Contents',
- }, True)
- app.add_transform(md_transform.AutoStructify)
+ for filename in html_css_files:
+ app.add_stylesheet(filename)
+
+# def setup(app):
+# app.add_config_value('recommonmark_config', {
+# 'url_resolver': lambda url: github_doc_root + url,
+# 'auto_toc_tree_section': 'Contents',
+# }, True)
+# app.add_transform(md_transform.AutoStructify)
diff --git a/docs/contributing.rst b/docs/contributing.rst
deleted file mode 100644
index e582053e..00000000
--- a/docs/contributing.rst
+++ /dev/null
@@ -1 +0,0 @@
-.. include:: ../CONTRIBUTING.rst
diff --git a/docs/ember.rst b/docs/ember.rst
new file mode 100644
index 00000000..a452cf3a
--- /dev/null
+++ b/docs/ember.rst
@@ -0,0 +1,111 @@
+Introduction
+============
+
+The Container Storage Interface (`CSI`_) is a standard for provision and use block and file storage systems in containerized workloads on Container Orchestration Systems (COs) like OpenShift.
+
+Using this interface new storage systems can be exposed to COs without needing to change the COs code.
+
+Ember-CSI is an Open Source implementation of the `CSI`_ specification supporting storage solutions from multiple vendors by leveraging a library called `cinderlib `_ that provides an abstraction layer over the storage drivers.
+
+
+Features
+--------
+
+Ember-CSI supports `CSI`_ versions 0.2, 0.3, 1.0, and 1.1 providing the following features:
+
+- Volume provisioning: file and block types
+- Volume cloning
+- Volume deletion
+- Snapshot creation
+- Create volume from a snapshot
+- Snapshots deletion
+- Listing volumes with pagination
+- Listing snapshots with pagination
+- Attaching/Detaching volumes
+- Multi pod attaching (block mode only)
+- Storage capacity reporting
+- Node probing
+
+Limitations
+-----------
+
+There are 2 types of volumes in OpenShift and Kubernetes, Block and File, and while both are supported by Ember-CSI, behind the scenes all the storage drivers in Ember-CSI are for block storage systems.
+
+To provide File volumes from block storage Ember-CSI connects the volumes to the host, formats and present them to the Orchestrator for the containerized workloads.
+
+Since File type volumes are locally attached block volumes they cannot be shared between containers, so the Shared Access (RWX) Access Mode is not supported.
+
+This limitation does not apply to block volumes, that can be mounted in multiple hosts simultaneously and it's the application the one responsible to orchestrate the proper access to the disk.
+
+Supported drivers
+-----------------
+
+Ember-CSI includes a good number of storage drivers, but due to limitation on hardware availability only a small number of them have been validated at one point or another. In alphabetical order they are:
+
+- HPE3PARFC
+- HPE3PARISCSI
+- KaminarioISCSI
+- LVMVolume
+- PowerMaxFC
+- PowerMaxISCSI
+- QnapISCSI
+- RBD
+- SolidFire
+- SynoISCSI
+- XtremIOFC
+- XtremIOISCSI
+
+The remaining drivers included in Ember-CSI have not been validated yet:
+
+- ACCESSIscsi
+- AS13000
+- FJDXFC
+- FJDXISCSI
+- FlashSystemFC
+- FlashSystemISCSI
+- GPFS
+- GPFSRemote
+- HPELeftHandISCSI
+- HPMSAFC
+- HPMSAISCSI
+- HedvigISCSI
+- HuaweiFC
+- HuaweiISCSI
+- IBMStorage
+- InStorageMCSFC
+- InStorageMCSISCSI
+- InfortrendCLIFC
+- InfortrendCLIISCSI
+- LenovoFC
+- LenovoISCSI
+- LinstorDrbd
+- LinstorIscsi
+- MStorageFC
+- MStorageISCSI
+- MacroSANFC
+- MacroSANISCSI
+- NetAppCmodeFibreChannel
+- NetAppCmodeISCSI
+- NexentaISCSI
+- PSSeriesISCSI
+- PureFC
+- PureISCSI
+- Quobyte
+- RSD
+- SCFC
+- SCISCSI
+- SPDK
+- Sheepdog
+- StorPool
+- StorwizeSVCFC
+- StorwizeSVCISCSI
+- Unity
+- VNX
+- VZStorage
+- VxFlexOS
+- WindowsISCSI
+- WindowsSmbfs
+- ZadaraVPSAISCS
+
+
+.. _CSI: https://github.com/container-storage-interface/spec
diff --git a/docs/history.md b/docs/history.md
deleted file mode 120000
index a5333ae4..00000000
--- a/docs/history.md
+++ /dev/null
@@ -1 +0,0 @@
-../HISTORY.md
\ No newline at end of file
diff --git a/docs/img/advanced-settings.png b/docs/img/advanced-settings.png
new file mode 100644
index 00000000..b6d6483f
Binary files /dev/null and b/docs/img/advanced-settings.png differ
diff --git a/docs/img/favicon.png b/docs/img/favicon.png
new file mode 100644
index 00000000..1d5f8080
Binary files /dev/null and b/docs/img/favicon.png differ
diff --git a/docs/img/install/01-operatorhub.png b/docs/img/install/01-operatorhub.png
new file mode 100644
index 00000000..1f423855
Binary files /dev/null and b/docs/img/install/01-operatorhub.png differ
diff --git a/docs/img/install/02-operatorhub-search-ember.png b/docs/img/install/02-operatorhub-search-ember.png
new file mode 100644
index 00000000..e746c2bb
Binary files /dev/null and b/docs/img/install/02-operatorhub-search-ember.png differ
diff --git a/docs/img/install/03-confirm-community.png b/docs/img/install/03-confirm-community.png
new file mode 100644
index 00000000..61793f1b
Binary files /dev/null and b/docs/img/install/03-confirm-community.png differ
diff --git a/docs/img/install/04-install-1.png b/docs/img/install/04-install-1.png
new file mode 100644
index 00000000..9d7bb34c
Binary files /dev/null and b/docs/img/install/04-install-1.png differ
diff --git a/docs/img/install/05-install-2.png b/docs/img/install/05-install-2.png
new file mode 100644
index 00000000..4ba3576d
Binary files /dev/null and b/docs/img/install/05-install-2.png differ
diff --git a/docs/img/install/06-installing.png b/docs/img/install/06-installing.png
new file mode 100644
index 00000000..64c9f5c5
Binary files /dev/null and b/docs/img/install/06-installing.png differ
diff --git a/docs/img/install/07-succeeded.png b/docs/img/install/07-succeeded.png
new file mode 100644
index 00000000..a24ba1d5
Binary files /dev/null and b/docs/img/install/07-succeeded.png differ
diff --git a/docs/img/install/08-create-storage-banckend.png b/docs/img/install/08-create-storage-banckend.png
new file mode 100644
index 00000000..edad8413
Binary files /dev/null and b/docs/img/install/08-create-storage-banckend.png differ
diff --git a/docs/img/install/09-name-and-driver.png b/docs/img/install/09-name-and-driver.png
new file mode 100644
index 00000000..bc0707bb
Binary files /dev/null and b/docs/img/install/09-name-and-driver.png differ
diff --git a/docs/img/install/10-create-backend.png b/docs/img/install/10-create-backend.png
new file mode 100644
index 00000000..78d62d15
Binary files /dev/null and b/docs/img/install/10-create-backend.png differ
diff --git a/docs/img/install/11-EmberStorageBackends.png b/docs/img/install/11-EmberStorageBackends.png
new file mode 100644
index 00000000..4447906c
Binary files /dev/null and b/docs/img/install/11-EmberStorageBackends.png differ
diff --git a/docs/img/install/12-StatefulSet.png b/docs/img/install/12-StatefulSet.png
new file mode 100644
index 00000000..aec9a2f8
Binary files /dev/null and b/docs/img/install/12-StatefulSet.png differ
diff --git a/docs/img/install/13-DaemonSet.png b/docs/img/install/13-DaemonSet.png
new file mode 100644
index 00000000..9ecd487b
Binary files /dev/null and b/docs/img/install/13-DaemonSet.png differ
diff --git a/docs/img/install/14-ReplicaSets.png b/docs/img/install/14-ReplicaSets.png
new file mode 100644
index 00000000..a5444c6c
Binary files /dev/null and b/docs/img/install/14-ReplicaSets.png differ
diff --git a/docs/img/install/15-StorageClass.png b/docs/img/install/15-StorageClass.png
new file mode 100644
index 00000000..38378c1a
Binary files /dev/null and b/docs/img/install/15-StorageClass.png differ
diff --git a/docs/img/install/16-edit-annotations.png b/docs/img/install/16-edit-annotations.png
new file mode 100644
index 00000000..273f91de
Binary files /dev/null and b/docs/img/install/16-edit-annotations.png differ
diff --git a/docs/img/install/17-default-sc.png b/docs/img/install/17-default-sc.png
new file mode 100644
index 00000000..def67a5e
Binary files /dev/null and b/docs/img/install/17-default-sc.png differ
diff --git a/docs/img/logo.svg b/docs/img/logo.svg
new file mode 100644
index 00000000..94510c5f
--- /dev/null
+++ b/docs/img/logo.svg
@@ -0,0 +1,70 @@
+
+
\ No newline at end of file
diff --git a/docs/img/troubleshoot/01-debug-logs.png b/docs/img/troubleshoot/01-debug-logs.png
new file mode 100644
index 00000000..349e497a
Binary files /dev/null and b/docs/img/troubleshoot/01-debug-logs.png differ
diff --git a/docs/img/usage/01-PVCs.png b/docs/img/usage/01-PVCs.png
new file mode 100644
index 00000000..eb62211f
Binary files /dev/null and b/docs/img/usage/01-PVCs.png differ
diff --git a/docs/img/usage/02-create-pvc.png b/docs/img/usage/02-create-pvc.png
new file mode 100644
index 00000000..1bffc5b0
Binary files /dev/null and b/docs/img/usage/02-create-pvc.png differ
diff --git a/docs/img/usage/03-expand-action.png b/docs/img/usage/03-expand-action.png
new file mode 100644
index 00000000..ec639057
Binary files /dev/null and b/docs/img/usage/03-expand-action.png differ
diff --git a/docs/img/usage/04-expand-size.png b/docs/img/usage/04-expand-size.png
new file mode 100644
index 00000000..a4dcb4ca
Binary files /dev/null and b/docs/img/usage/04-expand-size.png differ
diff --git a/docs/img/usage/05-delete-volume.png b/docs/img/usage/05-delete-volume.png
new file mode 100644
index 00000000..c5a71799
Binary files /dev/null and b/docs/img/usage/05-delete-volume.png differ
diff --git a/docs/img/usage/06-delete-confirmation.png b/docs/img/usage/06-delete-confirmation.png
new file mode 100644
index 00000000..2fcb0da7
Binary files /dev/null and b/docs/img/usage/06-delete-confirmation.png differ
diff --git a/docs/index.rst b/docs/index.rst
index 1b637e4d..fdfb7ff0 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,21 +1,16 @@
-Welcome to Ember CSI plugin's documentation!
-============================================
+Ember CSI
+=========
-Contents:
+Welcome to the Ember-CSI documentation!
+
+Ember-CSI is a plugin to provision and use block and file storage in containerized workloads on Kubernetes and OpenShift.
+
+The documentation for the site is organized into the following sections:
.. toctree::
:maxdepth: 2
- readme
+ ember
installation
usage
- contributing
- authors
- history
-
-Indices and tables
-==================
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
+ troubleshooting
diff --git a/docs/installation.rst b/docs/installation.rst
index b39265cf..e66c26d9 100644
--- a/docs/installation.rst
+++ b/docs/installation.rst
@@ -1,51 +1,148 @@
-.. highlight:: shell
-
-============
Installation
============
+Requirements
+------------
-Stable release
---------------
+Ember-CSI has the following requirements:
-To install Ember CSI plugin, run this command in your terminal:
+- Operating system: A Red Hat Linux distribution (support for other distributions is possible but not currently provided by the project):
+ - Centos 7 and Centos 8
+ - RHEL 7 and RHEL 8
+ - Fedora
-.. code-block:: console
+- Container Orchestrator: Both Kubernetes and OpenShift are supported:
+ - Openshift: Recommended version 4.4 or newer. Supported from version 3.10 onward.
+ - Kubernetes: Recommended version 1.17 or newer. Supported from version 1.11 onward.
- $ pip install ember_csi
+- Storage solution: Access and credentials to a `supported storage solution `_.
-This is the preferred method to install Ember CSI plugin, as it will always install the most recent stable release.
+- Network: Network connections must be setup appropriately.
+ - Controller nodes: Must have access to the storage management interface. Some drivers require access to the storage data network as well.
+ - Worker nodes: Must have access to the storage data network.
-If you don't have `pip`_ installed, this `Python installation guide`_ can guide
-you through the process.
+- Services: Depending on the driver and the configuration we may need additional services running on worker nodes and in some drivers controller nodes.
-.. _pip: https://pip.pypa.io
-.. _Python installation guide: http://docs.python-guide.org/en/latest/starting/installation/
+ - iSCSI: For iSCSI drivers the iSCSI initiator, `iscsid` provided by the `iscsi-initiator-utils` package must be configured and running. It can be running baremetal or in a container as long as the appropriate directories from the host are mounted.
+ - Multipathing: When selecting multipathing on iSCSI and FC drivers we'll need to have `multipathd`, provided by the `device-mapper-multipath` package, configured and running. It can be running baremetal or in a container as long as the appropriate directories from the host are mounted.
-From sources
-------------
-The sources for Ember CSI plugin can be downloaded from the `Github repo`_.
+OpenShift
+---------
+
+To eliminate configuration and deployment complexities and the errors that come with them, the recommended mechanism to deploy Ember-CSI is using its operator, which makes installing Ember-CSI a simple and intuitive process.
+
+After logging in the OpenShift console as an administrator we go to the OperatorHub:
+
+.. image:: img/install/01-operatorhub.png
+ :class: boxed-img
+ :scale: 75 %
+ :align: center
+
+Then we search for the Ember-CSI operator and click on it:
+
+.. image:: img/install/02-operatorhub-search-ember.png
+ :class: boxed-img
+ :align: center
+
+If we are installing the Community Operator we'll be required to confirm that we understand the implications. We click ``Continue``:
+
+.. image:: img/install/03-confirm-community.png
+ :class: boxed-img
+ :align: center
+
+And we are presented with the Ember-CSI Operator page, where we click ``Install``:
+
+.. image:: img/install/04-install-1.png
+ :class: boxed-img
+ :align: center
+
+And then ``Install`` again:
+
+.. image:: img/install/05-install-2.png
+ :class: boxed-img
+ :align: center
+
+This will trigger the download and execution of the operator container image. It will take a couple of seconds, and in the meantime we'll see that the installation is in progress and maybe a couple of weird entries saying at the beginning:
+
+.. image:: img/install/06-installing.png
+ :class: boxed-img
+ :align: center
+
+Once the operator reaches the ``Succeeded`` status we click on it:
+
+.. image:: img/install/07-succeeded.png
+ :class: boxed-img
+ :align: center
+
+Inside the Ember-CSI operator page we create a new ``Storage Backend`` instance:
+
+.. image:: img/install/08-create-storage-banckend.png
+ :class: boxed-img
+ :align: center
+
+Backends can be configured using YAML, but this is a cumbersome process, so it's usually only used on automated processes such as CI systems, and the Ember-CSI team recommends using the form interface when doing things manually, which is the default on OpenShift 4.5.
+
+In the form we should change the *name* field from *default* to a unique and meaningful name to identify this backend. Then go to the *Driver* dropdown and select the name of our storage backend. After selecting the appropriate driver the relevant configuration optionsfor the selected driver will be displayed.
+
+.. image:: img/install/09-name-and-driver.png
+ :class: boxed-img
+ :align: center
+
+After setting the configuration options we click *Create* at the botom of the page:
+
+.. image:: img/install/10-create-backend.png
+ :class: boxed-img
+ :align: center
+
+And a new *EmberStorageBackend* entity will be created. Don't wait for the *Status* to change, since it won't:
+
+.. image:: img/install/11-EmberStorageBackends.png
+ :class: boxed-img
+ :scale: 75 %
+ :align: center
+
+
+You can see that the deployment is complete going to *Stateful Sets*, *Daemon Sets*, and *Replica Sets* pages in the *Workloads* section to see that the deployed pods are running:
+
+.. image:: img/install/12-StatefulSet.png
+ :class: boxed-img
+ :scale: 75 %
+ :align: center
-You can either clone the public repository:
+.. image:: img/install/13-DaemonSet.png
+ :class: boxed-img
+ :scale: 75 %
+ :align: center
-.. code-block:: console
+.. image:: img/install/14-ReplicaSets.png
+ :class: boxed-img
+ :scale: 75 %
+ :align: center
- $ git clone git://github.com/akrog/ember_csi
+You can also check that a new *Storage Class* has been created in *Storage* > *Storage Classes*. The name of the new class will be *example.ember-csi.io* where *example* will be the name you gate to the *Storage Backend* in the form:
-Or download the `tarball`_:
+.. image:: img/install/15-StorageClass.png
+ :class: boxed-img
+ :scale: 75 %
+ :align: center
-.. code-block:: console
+We can set this *Storage Class* as the default class by going to its actions and selecting *Edit Annotations*:
- $ curl -OL https://github.com/akrog/ember_csi/tarball/master
+.. image:: img/install/16-edit-annotations.png
+ :class: boxed-img
+ :scale: 75 %
+ :align: center
-Once you have a copy of the source, you can install it with:
+And then adding key `storageclass.kubernetes.io/is-default-class` with the value of `true`.
-.. code-block:: console
+.. image:: img/install/17-default-sc.png
+ :class: boxed-img
+ :align: center
- $ python setup.py install
+.. warning:: If you already have a default and you want to change it to this one, you'll you need to modify the current default by removing the annotation or setting it to `false`.
+If we have configured everything right we'll now be able to use our storage solution into OpenShift using the new ``StorageClass`` that was created by the operator. In the `usage section `_ there is information on how to use the new Storage Backend.
-.. _Github repo: https://github.com/akrog/ember_csi
-.. _tarball: https://github.com/akrog/ember_csi/tarball/master
+If you see problems in the new *Stateful*, *Daemon*, or *Replica Sets*, please refer to the `troubleshooting guide `_ for details on how to resolve installation issues.
diff --git a/docs/readme.md b/docs/readme.md
deleted file mode 120000
index 32d46ee8..00000000
--- a/docs/readme.md
+++ /dev/null
@@ -1 +0,0 @@
-../README.md
\ No newline at end of file
diff --git a/docs/troubleshooting.rst b/docs/troubleshooting.rst
new file mode 100644
index 00000000..c1794794
--- /dev/null
+++ b/docs/troubleshooting.rst
@@ -0,0 +1,200 @@
+Troubleshooting
+===============
+
+The main tool used to investigate issues between Ember-CSI and the Orchestrator are OpenShift/Kubernetes status and logs.
+
+Ember-CSI runs 2 types of services, one is the controller and the other is the node type. While the controller takes care of the management operations (create, delete, map/unmap, snapshots, etc.) the node mostly takes care of doing the local attach and detach on the hosts that are running the pods.
+
+These services follow the CSI specification, exposing all their operations through a gRPC interface that needs to be translated into OpenShift/Kubernetes objects. The sidecars present in the Ember-CSI pods are responsible for the translation.
+
+
+Status
+------
+
+The first thing we need to do when we encounter an issue is make sure that all the containers in the Ember-CSI pods, the driver container and the sidecars, are running and that their restart counts are not increasing.
+
+Instead of looking at all the pod in our deployment we can use the fact that the operator adds the ``embercsi_cr`` label to filter for the pods of a specific backend:
+
+.. code-block:: shell
+
+ $ # On OpenShift
+ $ oc get pod -n -l embercsi_cr= -o wide
+
+ $ # On Kubernetes
+ $ kubectl get pod -n -l embercsi_cr= -o wide
+
+Or the pods for all the Ember-CSI backends:
+
+.. code-block:: shell
+
+ $ oc get pod -n -l embercsi_cr -o wide
+
+When using an iSCSI or FC backend we need to make sure that the system daemons required for the connections are running and they are not reporting errors if we encounter issues on the following operations:
+
+- Creating a volume from a source (volume or snapshot): On some drivers this is not a backend assisted operation, so the resources in the backend need to be accessed in the controller node.
+
+- Creating or destroying a pod that uses an Ember-CSI volume:
+
+If we are running the daemons as systemd services on baremetal, we can check them running:
+
+.. code-block:: shell
+
+ $ systemctl status iscsid multipathd
+ $ sudo journalctl -u iscsid -u multipathd
+
+On the other hand, if we are running the daemons in the foreground inside containers, we'll have to check the containers status and logs themselves.
+
+Logs
+----
+
+One of the most versatile tools to debug issues in general are the logs, and Ember-CSI is no different.
+
+The logs we'll have to check will depend on the operations that are failing:
+
+- If it's creating/deleting a volume or creating/deleting a snapshot, we should look into the Ember-CSI controller pod, primarily the driver container.
+
+- Creating/destroying a pod that uses a volume is one of the most complex operations, and it requires the controller pod, the node pod, and the kubelet, so we'll have to look into all their logs.
+
+By default Ember-CSI logs will be on *INFO* level and they can only be changed to *DEBUG* when creating the Storage Backend in the *Advanced Settings* section:
+
+.. image:: img/advanced-settings.png
+ :class: boxed-img
+ :scale: 75 %
+ :align: center
+
+By setting the *Debug logs* checkbox:
+
+.. image:: img/troubleshoot/01-debug-logs.png
+ :class: boxed-img
+ :align: center
+
+
+CSC
+---
+
+When debugging issues on complex flows, it's very convenient to be able to test the individual tasks that form the flows. For that purpose the Ember-CSI has created containers with the ``csc`` tool for each of the CSI specs.
+
+The ``csc`` tool allows us to execute specific CSI operations directly against an Ember-CSI service.
+
+For example, we could run a create volume operation completely bypassing the Orchestrator. This way we could focus on the Ember-CSI code itself and the interactions with the storage solutions, removing the interactions with other elements such as OpenShift/Kubernetes scheduler and the sidecars.
+
+Neither Kubernetes nor OpenShift allows adding containers to a running Pod, but there is an `Alpha feature called Ephemeral Containers `_ designed for debugging purposes that can do it.
+
+We need to have the feature gate ``EphemeralContainers`` enabled in our Orchestrator. Specifically on the API, Scheduler, and Kubelet: ``--feature-gates=EphemeralContainers=true``.
+
+If it's enabled we can add an Ephemeral container with the ``csc`` command to our running pod.
+
+For the following steps we'll assume we have used the name ``example`` as our Backend name.
+
+First we check the CSI version that is using Ember-CSI:
+
+.. code-block:: shell
+
+ $ oc describe pod example-controller-0|grep X_CSI_SPEC_VERSION
+ X_CSI_SPEC_VERSION: 1.0
+
+Now that we know we are running CSI v1.0 we know the ``csc`` container we want to use: *embercsi/csc:v1.0.0*
+
+With that we can write the ``csc.json`` file to add the Ephemeral Container:
+
+.. code-block:: json
+
+ {
+ "apiVersion": "v1",
+ "kind": "EphemeralContainers",
+ "metadata": {
+ "name": "example-controller-0"
+ },
+ "ephemeralContainers": [
+ {
+ "command": ["tail"],
+ "args": ["-f", "/dev/null"],
+ "image": "embercsi/csc:v1.0.0",
+ "imagePullPolicy": "IfNotPresent",
+ "name": "csc",
+ "stdin": true,
+ "tty": true,
+ "terminationMessagePolicy": "File",
+ "env": [ {"name": "CSI_ENDPOINT",
+ "value": "unix:///csi-data/csi.sock"} ],
+ "volumeMounts": [
+ {
+ "mountPath": "/csi-data",
+ "mountPropagation": "HostToContainer",
+ "name": "socket-dir"
+ }
+ ]
+ }
+ ]
+ }
+
+And, assuming we don't have any other Ephemeral Containers, we add it by replace the current value:
+
+.. code-block:: shell
+
+ $ oc replace --raw /api/v1/namespaces/default/pods/example-controller-0/ephemeralcontainers -f csc.json
+
+If we don't want to create a file we can do a one-liner by using ``echo`` a piping it to the ``oc replace`` command and setting the file contents to *stdin* with ``-f -``.
+
+Now that we have added the Ephemeral Container we can confirm it is running looking at the description of the controller pod and going to the ``Ephemeral Containers`` section and checking the ``State``:
+
+.. code-block:: shell
+
+ $ oc describe pod example-controller-0
+
+ ...
+
+ Ephemeral Containers:
+ csc:
+ Container ID: docker://e52d25a53af77a6f660d171504aa9dc6c2c3d405a9af20451054fadba969c84a
+ Image: embercsi/csc:v1.0.0
+ Image ID: docker-pullable://embercsi/csc@sha256:5433e0042725398b9398be1b73d43cc96c77893cf4b77cafca77001fa533cd29
+ Port:
+ Host Port:
+ Command:
+ sh
+ State: Running
+ Started: Thu, 13 Aug 2020 14:18:23 +0000
+ Ready: False
+ Restart Count: 0
+ Environment:
+ CSI_ENDPOINT: unix:///csi-data/csi.sock
+ Mounts:
+ /csi-data from socket-dir (rw)
+
+When we have the shell container running we can run ``csc`` commands by attaching to the shell. For example to see the help:
+
+.. code-block:: shell
+
+ $ oc attach -it example-controller-0 -c csc
+ If you don't see a command prompt, try pressing enter.
+ / # csc
+ NAME
+ csc -- a command line container storage interface (CSI) client
+
+ SYNOPSIS
+ csc [flags] CMD
+
+ AVAILABLE COMMANDS
+ controller
+ identity
+ node
+
+ Use "csc -h,--help" for more information
+
+.. warning:: Just like with normal containers, once you add an Ephemeral Container to a pod you cannot remove it, so be sure to detach from the container and not ``exit`` the shell, or the container will no longer be running and you won't be able to use it (you cannot run ``exec`` on an Ephemeral Container).
+
+.. note:: To detach from the ``csc`` container shell you must type the escape sequence *Ctrl+P* followed by *Ctrl+Q*.
+
+
+CRDs
+----
+
+Ember-CSI uses OpenShift/Kubernets etcd service to store metadata of its resources in the form of CRDs. Existing CRDs are:
+
+- Volume: Stores each volume's status as well as the information necessary to locate them in the storage solution.
+- Snapshot: Stores the information necessary to locate each snapshot in the storage solution.
+- Connection: Stores the connection information needed for a node to connect to a volume.
+- KeyValue: Stores the connector information needed to map the volumes to the nodes on the storage solution.
+
+These CRDs are just JSON dictionaries with all the information Ember-CSI needs to operate, and in some cases it can be useful to examine them to see internal information.
diff --git a/docs/usage.rst b/docs/usage.rst
index 9bb726a1..c449d42c 100644
--- a/docs/usage.rst
+++ b/docs/usage.rst
@@ -1,7 +1,259 @@
-=====
Usage
=====
-To use Ember CSI plugin in a project::
+Now that we have completed the `installation `_ of Ember-CSI, we can manage our Storage Backend in our Container Orchestrator.
+
+In this section examples will be provided both for the OpenShift Web Console and for the command line in the form of YAML manifests.
+
+The same YAML manifests work on Kubernetes and OpenShift, the only difference is the command to invoke. For Kubernetes we'll use `kubectl`` and ``oc`` for OpenShift:
+
+.. code-block:: shell
+
+ $ # On OpenShift
+ $ oc apply -f manifest.yaml
+
+ $ # On Kubernetes
+ $ kubectl apply -f manifest.yaml
+
+
+NOTE: In all the examples we'll assume we created the *Storage Backend* with the default name *example* using the Operator, and that the *Storage Class* automatically created is *example.ember-csi.io*
+
+
+Volume creation
+---------------
+
+We can create 2 types of volumes, Block and File, and both are supported by Ember-CSI, but OpenShift forms don't allow specifying the type on creation, so they always default to File.
+
+To create a volume we go to *Storage* > *Persistence VolumeClaims* and click on *Create Persistent Volume Clain*.
+
+.. image:: img/usage/01-PVCs.png
+ :class: boxed-img
+ :scale: 75 %
+ :align: center
+
+On the next page we must select the *Storage Class* created by the operator, give the *PVC* a unique name, select the *Access Mode* and the size.
+
+.. image:: img/usage/02-create-pvc.png
+ :class: boxed-img
+ :align: center
+
+.. warning:: Ember-CSI only supports the Shared Access (RWX) *Access Mode* for Block volumes.
+
+.. note:: OpenShift doesn't support selecting the type of volume we want to create, so we'll have to use YAML if we want to create a Block volume.
+
+To select the type of volume we want in our YAML we'll use the ``volumeMode`` parameter. Acceptable values are ``Block`` and ``Filesystem``, the default being ``Filesystem``.
+
+Example of a *PVC* manifest using this default:
+
+.. code-block:: yaml
+
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: my-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+ storageClassName: example.ember-csi.io
+
+
+Similar *PVC* example for a Block volume:
+
+.. code-block:: yaml
+
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: my-block-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ volumeMode: Block
+ resources:
+ requests:
+ storage: 3Gi
+ storageClassName: example.ember-csi.io
+
+
+Using volumes
+-------------
+
+Using a dynamically created *PVC* is as easy as adding a ``persistentVolumeClaim`` parameter with the ``claimName`` in the ``volumes`` section of our manifest:
+
+.. code-block:: yaml
+
+ kind: Pod
+ apiVersion: v1
+ metadata:
+ name: my-app
+ spec:
+ containers:
+ - name: my-frontend
+ image: busybox
+ volumeMounts:
+ - mountPath: "/data"
+ name: my-csi-volume
+ command: [ "sleep", "1000000" ]
+
+ volumes:
+ - name: my-csi-volume
+ persistentVolumeClaim:
+ claimName: my-pvc
+
+
+Expanding Volumes
+-----------------
+
+We can expand already created volumes to have more space without losing existing data. The operation is called *expanding*, and it's very straightforward. In the web console we just go to the actions we can do in the *PVC* and select *Expand PVC*.
+
+.. image:: img/usage/03-expand-action.png
+ :class: boxed-img
+ :scale: 75 %
+ :align: center
+
+Then write the new size, that must be greater or equal than the existing size, and click on *Expand*.
+
+.. image:: img/usage/04-expand-size.png
+ :class: boxed-img
+ :align: center
+
+When using the command line and a YAML manifest, we just need to modify the original contents with the new ``storage`` size, and it's important to use ``apply`` and not ``create`` on the command line:
+
+.. code-block:: yaml
+
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: csi-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 2Gi
+ storageClassName: example.ember-csi.io
+
+
+Volume cloning
+--------------
+
+Volume cloning is the process of creating a new volume with the same contents as the source volume.
+
+The new volume must be greater or equal in size as the original one and the source volume must be specified in the ``dataSource`` parameter, which is not available yet in the OpenShift Web Console, so we'll have to use YAML to do it:
+
+.. code-block:: yaml
+
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: cloned-vol
+ spec:
+ storageClassName: example.ember-csi.io
+ volumeMode: Block
+ dataSource:
+ name: my-block-pvc
+ kind: PersistentVolumeClaim
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 5Gi
+
+
+Snapshot creation
+-----------------
+
+During the deployment phase the Operator also creates a *VolumeSnapshotClass* for our Storage Backend with the same name as the *StorageClass* so we can easily create snapshots.
+
+.. note:: As of OpenShift 4.5 the Web Console doesn't have support for snapshots, but the necessary code is being merged in master, so it will most likely be available in OpenShift 4.6.
+
+So we'll have to use a YAML manifest and use the ``source`` parameter to define the volume we want to snapshot.
+
+.. code-block:: yaml
+
+ apiVersion: snapshot.storage.k8s.io/v1beta1
+ kind: VolumeSnapshot
+ metadata:
+ name: my-snapshot
+ spec:
+ volumeSnapshotClassName: example.ember-csi.io
+ source:
+ persistentVolumeClaimName: my-block-pvc
+
+
+Restoring a snapshot
+--------------------
+
+To restore an already created snapshot we'll have to create a new volume and use our snapshot as its source.
+
+The new volume must be of greater or equal size than the snapshot.
+
+.. code-block:: yaml
+
+ apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: restored-snapshot
+ spec:
+ storageClassName: example.ember-csi.io
+ dataSource:
+ name: my-snapshot
+ kind: VolumeSnapshot
+ apiGroup: snapshot.storage.k8s.io
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 3Gi
+
+
+Volume deletion
+---------------
+
+The *Storage Class* created by the operator is defined with a ``Delete`` ``ReclaimPolicy``, which means that once we delete the *PVC* the dynamically created *PV* will be deleted.
+
+We can delete a volume in the OpenShift Web Console by going to *Storage* > *Persistence VolumeClaims* and look for the *PVC* we want to delete and in its actions we select *Delete Persistent Volume Claim*:
+
+.. image:: img/usage/05-delete-volume.png
+ :class: boxed-img
+ :scale: 75 %
+ :align: center
+
+Deletion requires confirmation, so we'll have to click on the *Delete* volume:
+
+.. image:: img/usage/06-delete-confirmation.png
+ :class: boxed-img
+ :align: center
+
+Deleting the *PVC* from the command line can be done using the name:
+
+.. code-block:: shell
+
+ $ oc delete pvc my-block-pvc
+
+Or with the manifest we used to create it:
+
+.. code-block:: shell
+
+ $ oc delete -f manifest.yaml
+
+
+Snapshot deletion
+-----------------
+
+.. note:: As of OpenShift 4.5 the Web Console doesn't have support for snapshots, but the necessary code is being merged in master, so it will most likely be available in OpenShift 4.6.
+
+Deleting a *VolumeSnapshot* from the command line can be done using the name:
+
+.. code-block:: shell
+
+ $ oc delete pvc my-snapshot
+
+Or with the manifest we used to create it:
+
+.. code-block:: shell
- import ember_csi
+ $ oc delete -f manifest.yaml
diff --git a/requirements_docs.txt b/requirements_docs.txt
index 1653f202..5139094a 100644
--- a/requirements_docs.txt
+++ b/requirements_docs.txt
@@ -1,5 +1,4 @@
Sphinx==1.6.5
-recommonmark==0.4.0
git+https://github.com/akrog/modulefaker.git#egg=modulefaker
git+https://github.com/akrog/cindermock.git
git+https://github.com/akrog/nosbrickmock.git