diff --git a/Gopkg.lock b/Gopkg.lock index 5c557f4da6d..76f97ce2568 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -2,7 +2,7 @@ [[projects]] - digest = "1:5db04cb8ff8f56d4967716740797dbf00db609debb9ae4651b78b0822506e0f9" + digest = "1:aa65b4877ac225076b4362885e9122fdf6a8728f735749c24f1aeabcad9bdaba" name = "cloud.google.com/go" packages = [ "compute/metadata", @@ -27,7 +27,7 @@ revision = "7d2e70ef918f16bd6455529af38304d6d025c952" [[projects]] - digest = "1:ab0cb2c48ec5cbf953be2e7aa06d5b51cc77588cf55c4485f9f783c80f8ffff6" + digest = "1:ad9087a17c31657678fb860baae5d38c8d50b4f35abc510e4d043f8eb1b65139" name = "github.com/aws/aws-sdk-go" packages = [ "aws", @@ -100,7 +100,7 @@ version = "v1.3.0" [[projects]] - digest = "1:9f5e5fe15b95106e3b79c9d3cd9467a580b28ab44b2ccc07f617076d736ebdc2" + digest = "1:4189ee6a3844f555124d9d2656fe7af02fca961c2a9bad9074789df13a0c62e0" name = "github.com/docker/distribution" packages = [ "digestset", @@ -110,7 +110,7 @@ revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" [[projects]] - digest = "1:91b813714fca7fef5d9af1a078c345ed60383d0618af554595ab5d24620c8c18" + digest = "1:0cd7330848fe619c5d94cbc9653b7f4578717be10cf778b8ba80ce263b722972" name = "github.com/docker/docker" packages = [ "api/types", @@ -145,7 +145,7 @@ revision = "9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1" [[projects]] - digest = "1:de392926dbc8400489e966f069da47b500499aadb9a08bb17208fe2089a18f15" + digest = "1:58be7025fd84632dfbb8a398f931b5bdbbecc0390e4385df4ae56775487a0f87" name = "github.com/docker/spdystream" packages = [ ".", @@ -177,7 +177,7 @@ version = "v1.38.0" [[projects]] - digest = "1:744234c8d9c52b53c99cbba9e9b7bcb2ac9b3a02ab6c9d3d034c9db32ef11999" + digest = "1:a72f907ee592d4bf50da0453fb572a8987f6ffbcb403d55dfc975cd32eecbcc4" name = "github.com/go-ozzo/ozzo-validation" packages = [ ".", @@ -187,7 +187,7 @@ revision = "2c68ddd4ffc17941d8e940cb1264f68e1d8f0394" [[projects]] - digest = "1:834165718f3784080169bdd9dcb6f7cb164597849de5fb724c8eaa1f4cc083ec" + digest = "1:f83d740263b44fdeef3e1bce6147b5d7283fcad1a693d39639be33993ecf3db1" name = "github.com/gogo/protobuf" packages = [ "proto", @@ -212,7 +212,7 @@ revision = "02826c3e79038b59d737d3b1c0a1d937f71a4433" [[projects]] - digest = "1:cb22af0ed7c72d495d8be1106233ee553898950f15fd3f5404406d44c2e86888" + digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260" name = "github.com/golang/protobuf" packages = [ "proto", @@ -248,7 +248,7 @@ revision = "44d81051d367757e1c7c6a5a86423ece9afcf63c" [[projects]] - digest = "1:1d6cd25e67e73ad2532c382470a6f89454deb95a34b5c3f925b89b69ad886c9f" + digest = "1:75eb87381d25cc75212f52358df9c3a2719584eaa9685cd510ce28699122f39d" name = "github.com/googleapis/gnostic" packages = [ "OpenAPIv2", @@ -259,7 +259,7 @@ revision = "0c5108395e2debce0d731cf0287ddf7242066aba" [[projects]] - digest = "1:70879989e4da569b5c58a027c0d56c3eb605d340723d21122e8d14386da2fd74" + digest = "1:0845aeb1fb067e1a252eecff35fda2cb497b9f4adaf302b9616fb980e21e2781" name = "github.com/gophercloud/gophercloud" packages = [ ".", @@ -296,7 +296,7 @@ revision = "6e5b7d64ea59ae3b8fdf0456e73e6c7638200e31" [[projects]] - digest = "1:2b7718265ba4d3a1935b9da9907fd30cb3d605fa2260845932b3728efa8f1c84" + digest = "1:878f0defa9b853f9acfaf4a162ba450a89d0050eff084f9fe7f5bd15948f172a" name = "github.com/gregjones/httpcache" packages = [ ".", @@ -324,7 +324,7 @@ revision = "a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4" [[projects]] - digest = "1:3925cc1c63e14def2e5a701f36d0e4b8339feac7cabf12294041d37178ce43c7" + digest = "1:ab3c9cd5ada38a60ddcaa2902229d35fbab0d6355b2be1f3dd48e6051192c47b" name = "github.com/hashicorp/hcl" packages = [ ".", @@ -341,7 +341,7 @@ revision = "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1" [[projects]] - digest = "1:d68188a2ae82faa38dfe5d384cb3cdc4d3221c6c64db667787aae81a1b3b681d" + digest = "1:3059c552a2921316909413c8a614610c55f90473206a2e0d1d32cf761b3165ef" name = "github.com/heketi/heketi" packages = [ "client/api/go-client", @@ -398,7 +398,7 @@ revision = "9577782540c1398b710ddae1b86268ba03a19b0c" [[projects]] - digest = "1:a6c16c54ee4b0fa8f03e4a109b0b2f7dd930f7f5b9328967a4da319ae22541ea" + digest = "1:4007d130ea960ece2c12269732e7d6002b8db1e1c5276be236e18440350e2e52" name = "github.com/magiconair/properties" packages = ["."] pruneopts = "UT" @@ -511,7 +511,7 @@ version = "v1.0.0" [[projects]] - digest = "1:2c9480e8463760b33c8139014be88b25b7e89b83f51d0cf2aa62437026dea95f" + digest = "1:5ea1368c1be4bf21c05cffe69e2341c0ee354ce5c7ed45291d8abd6ceea1ddeb" name = "github.com/prometheus/client_golang" packages = [ "prometheus", @@ -521,14 +521,14 @@ revision = "e7e903064f5e9eb5da98208bae10b475d4db0f8c" [[projects]] - digest = "1:807ecadc353783b201253e72be17f5f30dc8030078b05b1554d518b4f47ee8be" + digest = "1:9fe8945a11a9f588a9d306b4741cad634da9015a704271b9506810e2cc77fa17" name = "github.com/prometheus/client_model" packages = ["go"] pruneopts = "UT" revision = "fa8ad6fec33561be4280a8f0514318c79d7f6cb6" [[projects]] - digest = "1:665a846d608ec63ad8f889bef206425d12351eb3522da3a65ed2446dcd01a674" + digest = "1:0d5f8e2195ad2beef202367f3217c4a7981582d96ccf4876b9aa2c5c9c9b3510" name = "github.com/prometheus/common" packages = [ "expfmt", @@ -539,7 +539,7 @@ revision = "13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207" [[projects]] - digest = "1:5af1b18de552be69fccfa8d869fe4ab95ebb0de1f4fe47dd414d49b8b3d20880" + digest = "1:c78edab144d03422b52cd34d5fa4ffc9a59fef90b3afdcf2efc4dd333479f243" name = "github.com/prometheus/procfs" packages = [ ".", @@ -549,7 +549,7 @@ revision = "65c1f6f8f0fc1e2185eb9863a3bc751496404259" [[projects]] - digest = "1:8bf0ce9071708a0c554d39847f966e185be4d52e18643c956034dc7714f2d5e2" + digest = "1:c10331981912057a246099d926a3f9513cbac59c17c4a05919b7177edb1bed46" name = "github.com/spf13/afero" packages = [ ".", @@ -602,7 +602,7 @@ revision = "1a9d0bb9f541897e62256577b352fdbc1fb4fd94" [[projects]] - digest = "1:66afc5e76ffe653e47c986b655704d7a181e6cf1604d11d155141e9c3da03bdb" + digest = "1:1c1cae4005bff90221db40a495c58a2f166a08171d043c048e9c29065a5f46ba" name = "github.com/stretchr/testify" packages = [ "assert", @@ -621,7 +621,7 @@ [[projects]] branch = "master" - digest = "1:008e57973cf6d5f2484d53c021c4253b09d60798655653147eea45ad4b9d5c3b" + digest = "1:014ee502e4880ffba2a0103cefa6af78cf8767ce08bc29376e6b86cdcaa2ec06" name = "golang.org/x/crypto" packages = [ "curve25519", @@ -637,7 +637,7 @@ revision = "a2144134853fc9a27a7b1e3eb4f19f1a76df13c9" [[projects]] - digest = "1:1647a97fbcd142cb9323b5bc3cbde4103889b6d11ace8aadddc125a239e764d2" + digest = "1:804381b0d2453fbeedf313b7bf74057e142c9cc7e10f21e6927a49911b249bc4" name = "golang.org/x/net" packages = [ "bpf", @@ -656,7 +656,7 @@ revision = "1c05540f6879653db88113bc4a2b70aec4bd491f" [[projects]] - digest = "1:fe78cdb5774130710255adf3beaf9258c5bca264fd12b7d48b9c7f70ea6bb169" + digest = "1:ad764db92ed977f803ff0f59a7a957bf65cc4e8ae9dfd08228e1f54ea40392e0" name = "golang.org/x/oauth2" packages = [ ".", @@ -669,7 +669,7 @@ revision = "a6bd8cefa1811bd24b86f8902872e4e8225f74c4" [[projects]] - digest = "1:72d6244a51be9611f08994aca19677fcc31676b3e7b742c37e129e6ece4ad8fc" + digest = "1:3364d01296ce7eeca363e3d530ae63a2092d6f8efb85fb3d101e8f6d7de83452" name = "golang.org/x/sys" packages = [ "unix", @@ -679,7 +679,7 @@ revision = "1b2967e3c290b7c545b3db0deeda16e9be4f98a2" [[projects]] - digest = "1:11e5ba605f499e37162cc8ca25fb16d6ff66fbfdbdc43c92d6f1351cde975413" + digest = "1:97337ef8cb438f9e3a99ea91a300e916ed9a96fbf3ad50f9a020d30ea9f8692f" name = "golang.org/x/text" packages = [ "internal/gen", @@ -703,7 +703,7 @@ revision = "f51c12702a4d776e4c1fa9b0fabab841babae631" [[projects]] - digest = "1:89f06d65e6e3c5dc879f71f401a85219f642b84b285e847e7012d5c0a49ec275" + digest = "1:ce8cd932ad36eabc590baaa771c4c930be91a863f51bd681c6d422e919cd4ad2" name = "google.golang.org/api" packages = [ "compute/v1", @@ -716,7 +716,7 @@ revision = "8e296ef260056b6323d10727db40512dac6d92d5" [[projects]] - digest = "1:10eab6a94bbd813a6f162c1a89676b62b2d6c214190d1529ae2ebbde3c9e24b9" + digest = "1:c8907869850adaa8bd7631887948d0684f3787d0912f1c01ab72581a6c34432e" name = "google.golang.org/appengine" packages = [ ".", @@ -735,7 +735,7 @@ version = "v1.1.0" [[projects]] - digest = "1:44b4e800d04d4a2d36323db0cd5de805ba682c37c1166d33198cdc2eeed0b622" + digest = "1:1be698e0ef75f3dc0295692f0af314c0a9a12f9b93f51237c3f58f5356727131" name = "gopkg.in/gcfg.v1" packages = [ ".", @@ -756,7 +756,7 @@ version = "v0.9.0" [[projects]] - digest = "1:2f80f2b7950ffcbf46a20a827237711ef8be126f5ad3740e01d0c73174ea733c" + digest = "1:c45031ba03b85fc3b219c46b540996b793d1c5244ae4d7046314b8d09526c2a5" name = "gopkg.in/square/go-jose.v2" packages = [ ".", @@ -784,7 +784,7 @@ revision = "670d4cfef0544295bc27a114dbac37980d83185a" [[projects]] - digest = "1:ac38d38fc6250739eaea79a76df5d93892717bc2b55c8798178e726dae8472b2" + digest = "1:74142cd2275f77547c35ac51514108d9798a09aa0cf377a5c1084718ef7aa225" name = "k8s.io/api" packages = [ "admissionregistration/v1alpha1", @@ -822,7 +822,7 @@ version = "kubernetes-1.11.0" [[projects]] - digest = "1:f17e9f84fa864558e9d1abaaad8ef19a414153be9c9da2e4f557cb4f6791556f" + digest = "1:2ec605974e0610f8103b2624b09f810e3ba70676f81736ee2c785d550353104f" name = "k8s.io/apiextensions-apiserver" packages = [ "pkg/apis/apiextensions", @@ -837,7 +837,7 @@ version = "kubernetes-1.11.0" [[projects]] - digest = "1:be7c5f7a8c3a5e10678ad5ed6cff27a14ef7ca80a0f48b2b0b52cd3c92cba7aa" + digest = "1:8080f8bf0c388a270092d0ffc30c89433b3db6d9bba30aae597beec4efad9f92" name = "k8s.io/apimachinery" packages = [ "pkg/api/equality", @@ -895,7 +895,7 @@ version = "kubernetes-1.11.0" [[projects]] - digest = "1:0f0f106eb7444f0230e40a80fed648ab3736e28f6cf6e2160e15ba4d70bebcf1" + digest = "1:8f0ee1987d58aff277516da8ef32f09b66ad214c2c8a409c7cb1a9210fcc4376" name = "k8s.io/apiserver" packages = [ "pkg/authentication/authenticator", @@ -909,7 +909,7 @@ version = "kubernetes-1.11.0" [[projects]] - digest = "1:704e0debcc0c0e4d950482745bc3ce8c1b80c6623687b20c83f68e99368f225d" + digest = "1:73bf55116682b1956d611fd41f5a595226dacfb1ef4d30b010b5562029bf6d7a" name = "k8s.io/client-go" packages = [ "discovery", @@ -1058,6 +1058,8 @@ "tools/clientcmd/api", "tools/clientcmd/api/latest", "tools/clientcmd/api/v1", + "tools/leaderelection", + "tools/leaderelection/resourcelock", "tools/metrics", "tools/pager", "tools/record", @@ -1096,7 +1098,7 @@ revision = "56fd3e93ab8a6a61473b33fc687a54c6a7f28421" [[projects]] - digest = "1:7c3b56e8de04a4f10c1d1e665d61bd08bb2f07bd1cbb31ab855dae9961b5a74b" + digest = "1:8323cd541cc796d0eb4f8f7403f742defa59a2c8ebd01015300f8298d02ce7d6" name = "k8s.io/kubernetes" packages = [ "pkg/api/legacyscheme", @@ -1267,6 +1269,8 @@ "k8s.io/client-go/tools/cache", "k8s.io/client-go/tools/cache/testing", "k8s.io/client-go/tools/clientcmd", + "k8s.io/client-go/tools/leaderelection", + "k8s.io/client-go/tools/leaderelection/resourcelock", "k8s.io/client-go/tools/record", "k8s.io/client-go/tools/reference", "k8s.io/client-go/tools/remotecommand", diff --git a/aws/efs/README.md b/aws/efs/README.md index 4105bc754bb..9a48ca6b20a 100644 --- a/aws/efs/README.md +++ b/aws/efs/README.md @@ -141,24 +141,15 @@ If your cluster has RBAC enabled or you are running OpenShift you must authorize #### RBAC ```console -$ kubectl create -f deploy/auth/serviceaccount.yaml -serviceaccount "efs-provisioner" created -$ kubectl create -f deploy/auth/clusterrole.yaml -clusterrole "efs-provisioner-runner" created -$ kubectl create -f deploy/auth/clusterrolebinding.yaml -clusterrolebinding "run-efs-provisioner" created -$ kubectl patch deployment efs-provisioner -p '{"spec":{"template":{"spec":{"serviceAccount":"efs-provisioner"}}}}' +$ kubectl create -f deploy/rbac.yaml ``` #### OpenShift ```console -$ oc create -f deploy/auth/serviceaccount.yaml -serviceaccount "efs-provisioner" created -$ oc create -f deploy/auth/openshift-clusterrole.yaml +$ oc create -f deploy/openshift-clusterrole.yaml clusterrole "efs-provisioner-runner" created $ oadm policy add-scc-to-user hostmount-anyuid system:serviceaccount:default:efs-provisioner $ oadm policy add-cluster-role-to-user efs-provisioner-runner system:serviceaccount:default:efs-provisioner -$ oc patch deployment efs-provisioner -p '{"spec":{"template":{"spec":{"serviceAccount":"efs-provisioner"}}}}' ``` ### SELinux If SELinux is enforcing on the node where the provisioner runs, you must enable writing from a pod to a remote NFS server (EFS in this case) on the node by running: diff --git a/aws/efs/deploy/auth/clusterrolebinding.yaml b/aws/efs/deploy/auth/clusterrolebinding.yaml deleted file mode 100644 index 8b570f906ea..00000000000 --- a/aws/efs/deploy/auth/clusterrolebinding.yaml +++ /dev/null @@ -1,12 +0,0 @@ -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: run-efs-provisioner -subjects: - - kind: ServiceAccount - name: efs-provisioner - namespace: default -roleRef: - kind: ClusterRole - name: efs-provisioner-runner - apiGroup: rbac.authorization.k8s.io diff --git a/aws/efs/deploy/auth/serviceaccount.yaml b/aws/efs/deploy/auth/serviceaccount.yaml deleted file mode 100644 index 643e164c694..00000000000 --- a/aws/efs/deploy/auth/serviceaccount.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: efs-provisioner diff --git a/aws/efs/deploy/deployment.yaml b/aws/efs/deploy/deployment.yaml index 3670fca6c5f..15e433eae7b 100644 --- a/aws/efs/deploy/deployment.yaml +++ b/aws/efs/deploy/deployment.yaml @@ -1,3 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: efs-provisioner +--- kind: Deployment apiVersion: extensions/v1beta1 metadata: @@ -11,6 +16,7 @@ spec: labels: app: efs-provisioner spec: + serviceAccount: efs-provisioner containers: - name: efs-provisioner image: quay.io/external_storage/efs-provisioner:latest diff --git a/aws/efs/deploy/auth/openshift-clusterrole.yaml b/aws/efs/deploy/openshift-clusterrole.yaml similarity index 100% rename from aws/efs/deploy/auth/openshift-clusterrole.yaml rename to aws/efs/deploy/openshift-clusterrole.yaml diff --git a/aws/efs/deploy/pod.yaml b/aws/efs/deploy/pod.yaml index 80138310aa2..50d80612a3d 100644 --- a/aws/efs/deploy/pod.yaml +++ b/aws/efs/deploy/pod.yaml @@ -1,8 +1,14 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: efs-provisioner +--- kind: Pod apiVersion: v1 metadata: name: efs-provisioner spec: + serviceAccount: efs-provisioner containers: - name: efs-provisioner image: quay.io/external_storage/efs-provisioner:latest diff --git a/aws/efs/deploy/auth/clusterrole.yaml b/aws/efs/deploy/rbac.yaml similarity index 51% rename from aws/efs/deploy/auth/clusterrole.yaml rename to aws/efs/deploy/rbac.yaml index 08343ebac22..98d497bd660 100644 --- a/aws/efs/deploy/auth/clusterrole.yaml +++ b/aws/efs/deploy/rbac.yaml @@ -14,4 +14,20 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-efs-provisioner +subjects: + - kind: ServiceAccount + name: efs-provisioner + namespace: default +roleRef: + kind: ClusterRole + name: efs-provisioner-runner + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/ceph/cephfs/deploy/rbac/clusterrole.yaml b/ceph/cephfs/deploy/rbac/clusterrole.yaml index 494441f8712..06f3de04d66 100644 --- a/ceph/cephfs/deploy/rbac/clusterrole.yaml +++ b/ceph/cephfs/deploy/rbac/clusterrole.yaml @@ -15,4 +15,7 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] diff --git a/ceph/rbd/deploy/rbac/clusterrole.yaml b/ceph/rbd/deploy/rbac/clusterrole.yaml index bd31c326698..f9e86817aa7 100644 --- a/ceph/rbd/deploy/rbac/clusterrole.yaml +++ b/ceph/rbd/deploy/rbac/clusterrole.yaml @@ -14,7 +14,10 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] - apiGroups: [""] resources: ["services"] resourceNames: ["kube-dns"] diff --git a/digitalocean/manifests/rbac/clusterrole.yaml b/digitalocean/manifests/rbac/clusterrole.yaml index a6e588bd3fe..92ac05fc59b 100644 --- a/digitalocean/manifests/rbac/clusterrole.yaml +++ b/digitalocean/manifests/rbac/clusterrole.yaml @@ -15,4 +15,7 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] diff --git a/flex/deploy/manifests/rbac.yaml b/flex/deploy/manifests/rbac.yaml index f14a7f139b9..60436703ef8 100644 --- a/flex/deploy/manifests/rbac.yaml +++ b/flex/deploy/manifests/rbac.yaml @@ -14,7 +14,10 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] --- diff --git a/gluster/block/deploy/clusterrole.yaml b/gluster/block/deploy/clusterrole.yaml index 4f9c1f1cc18..4d22fa77f12 100644 --- a/gluster/block/deploy/clusterrole.yaml +++ b/gluster/block/deploy/clusterrole.yaml @@ -14,7 +14,10 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] - apiGroups: [""] resources: ["services"] verbs: ["get"] diff --git a/gluster/block/deploy/openshift/openshift-clusterrole.yaml b/gluster/block/deploy/openshift/openshift-clusterrole.yaml index 6ee0c04885c..ffe57717790 100644 --- a/gluster/block/deploy/openshift/openshift-clusterrole.yaml +++ b/gluster/block/deploy/openshift/openshift-clusterrole.yaml @@ -14,7 +14,10 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] - apiGroups: [""] resources: ["services"] verbs: ["get"] diff --git a/gluster/file/deploy/clusterrole.yaml b/gluster/file/deploy/clusterrole.yaml index 1b4c4a6d21a..5004794b1da 100644 --- a/gluster/file/deploy/clusterrole.yaml +++ b/gluster/file/deploy/clusterrole.yaml @@ -14,13 +14,13 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - apiGroups: [""] resources: ["services"] verbs: ["get", "create", "delete"] - apiGroups: [""] resources: ["secrets"] verbs: ["get"] - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "create", "delete"] diff --git a/gluster/file/deploy/openshift/openshift-clusterrole.yaml b/gluster/file/deploy/openshift/openshift-clusterrole.yaml index da47e7b00ea..12b01d69b66 100644 --- a/gluster/file/deploy/openshift/openshift-clusterrole.yaml +++ b/gluster/file/deploy/openshift/openshift-clusterrole.yaml @@ -14,13 +14,13 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] - apiGroups: [""] resources: ["services"] verbs: ["get", "create", "delete"] - apiGroups: [""] resources: ["secrets"] verbs: ["get"] - - apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "create", "delete"] diff --git a/iscsi/targetd/ansible/roles/deploy-provisioner/templates/iscsi-auth.yaml b/iscsi/targetd/ansible/roles/deploy-provisioner/templates/iscsi-auth.yaml index ff050a632f5..669f61043ca 100644 --- a/iscsi/targetd/ansible/roles/deploy-provisioner/templates/iscsi-auth.yaml +++ b/iscsi/targetd/ansible/roles/deploy-provisioner/templates/iscsi-auth.yaml @@ -14,7 +14,10 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] --- kind: ClusterRoleBinding apiVersion: v1 diff --git a/iscsi/targetd/cmd/start.go b/iscsi/targetd/cmd/start.go index 2d078100411..925da057f43 100644 --- a/iscsi/targetd/cmd/start.go +++ b/iscsi/targetd/cmd/start.go @@ -87,7 +87,6 @@ var startcontrollerCmd = &cobra.Command{ controller.FailedDeleteThreshold(viper.GetInt("fail-retry-threshold")) controller.LeaseDuration(viper.GetDuration("lease-period")) controller.RenewDeadline(viper.GetDuration("renew-deadline")) - controller.TermLimit(viper.GetDuration("term-limit")) controller.RetryPeriod(viper.GetDuration("retry-period")) log.Debugln("iscsi controller created, running forever...") pc.Run(wait.NeverStop) @@ -110,8 +109,6 @@ func init() { viper.BindPFlag("renew-deadline", startcontrollerCmd.Flags().Lookup("renew-deadline")) startcontrollerCmd.Flags().Duration("retry-period", controller.DefaultRetryPeriod, "RetryPeriod is the duration the LeaderElector clients should wait between tries of actions") viper.BindPFlag("retry-period", startcontrollerCmd.Flags().Lookup("retry-period")) - startcontrollerCmd.Flags().Duration("term-limit", controller.DefaultTermLimit, "TermLimit is the maximum duration that a leader may remain the leader to complete the task before it must give up its leadership. 0 for forever or indefinite.") - viper.BindPFlag("term-limit", startcontrollerCmd.Flags().Lookup("term-limit")) startcontrollerCmd.Flags().String("targetd-scheme", "http", "scheme of the targetd connection, can be http or https") viper.BindPFlag("targetd-scheme", startcontrollerCmd.Flags().Lookup("targetd-scheme")) startcontrollerCmd.Flags().String("targetd-username", "admin", "username for the targetd connection") diff --git a/iscsi/targetd/kubernetes/iscsi-provisioner-d.yaml b/iscsi/targetd/kubernetes/iscsi-provisioner-d.yaml index 6a5fc74eecc..babcafc0d8d 100644 --- a/iscsi/targetd/kubernetes/iscsi-provisioner-d.yaml +++ b/iscsi/targetd/kubernetes/iscsi-provisioner-d.yaml @@ -14,7 +14,10 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 diff --git a/iscsi/targetd/openshift/iscsi-auth.yaml b/iscsi/targetd/openshift/iscsi-auth.yaml index ff050a632f5..669f61043ca 100644 --- a/iscsi/targetd/openshift/iscsi-auth.yaml +++ b/iscsi/targetd/openshift/iscsi-auth.yaml @@ -14,7 +14,10 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] --- kind: ClusterRoleBinding apiVersion: v1 diff --git a/lib/controller/controller.go b/lib/controller/controller.go index aec1b60a059..56ff40187ac 100644 --- a/lib/controller/controller.go +++ b/lib/controller/controller.go @@ -20,7 +20,7 @@ import ( "fmt" "net" "net/http" - "os/exec" + "os" "strconv" "strings" "sync" @@ -28,8 +28,6 @@ import ( "github.com/golang/glog" "github.com/kubernetes-incubator/external-storage/lib/controller/metrics" - "github.com/kubernetes-incubator/external-storage/lib/leaderelection" - rl "github.com/kubernetes-incubator/external-storage/lib/leaderelection/resourcelock" "github.com/kubernetes-incubator/external-storage/lib/util" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -39,7 +37,6 @@ import ( storagebeta "k8s.io/api/storage/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" @@ -48,6 +45,8 @@ import ( "k8s.io/client-go/kubernetes/scheme" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/leaderelection" + "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/client-go/tools/record" ref "k8s.io/client-go/tools/reference" "k8s.io/client-go/util/workqueue" @@ -101,9 +100,6 @@ type ProvisionController struct { // * 1.6: storage classes enter GA kubeVersion *utilversion.Version - // TODO remove this - claimSource cache.ListerWatcher - claimInformer cache.SharedInformer claims cache.Store claimController cache.Controller @@ -121,7 +117,8 @@ type ProvisionController struct { // across restarts. Useful only for debugging, for seeing the source of // events. controller.provisioner may have its own, different notion of // identity which may/may not persist across restarts - identity types.UID + id string + component string eventRecorder record.EventRecorder resyncPeriod time.Duration @@ -141,14 +138,8 @@ type ProvisionController struct { // The path of metrics endpoint path. metricsPath string - // Parameters of leaderelection.LeaderElectionConfig. Leader election is for - // when multiple controllers are running: they race to lock (lead) every PVC - // so that only one calls Provision for it (saving API calls, CPU cycles...) - leaseDuration, renewDeadline, retryPeriod, termLimit time.Duration - // Map of claim UID to LeaderElector: for checking if this controller - // is the leader of a given claim - leaderElectors map[types.UID]*leaderelection.LeaderElector - leaderElectorsMutex *sync.Mutex + // Parameters of leaderelection.LeaderElectionConfig. + leaseDuration, renewDeadline, retryPeriod time.Duration hasRun bool hasRunLock *sync.Mutex @@ -175,8 +166,6 @@ const ( DefaultRenewDeadline = 10 * time.Second // DefaultRetryPeriod is used when option function RetryPeriod is omitted DefaultRetryPeriod = 2 * time.Second - // DefaultTermLimit is used when option function TermLimit is omitted - DefaultTermLimit = 30 * time.Second // DefaultMetricsPort is used when option function MetricsPort is omitted DefaultMetricsPort = 0 // DefaultMetricsAddress is used when option function MetricsAddress is omitted @@ -310,19 +299,6 @@ func RetryPeriod(retryPeriod time.Duration) func(*ProvisionController) error { } } -// TermLimit is the maximum duration that a leader may remain the leader -// to complete the task before it must give up its leadership. 0 for forever -// or indefinite. Defaults to 30 seconds. -func TermLimit(termLimit time.Duration) func(*ProvisionController) error { - return func(c *ProvisionController) error { - if c.HasRun() { - return errRuntime - } - c.termLimit = termLimit - return nil - } -} - // ClaimsInformer sets the informer to use for accessing PersistentVolumeClaims. // Defaults to using a private (non-shared) informer. func ClaimsInformer(informer cache.SharedInformer) func(*ProvisionController) error { @@ -410,26 +386,27 @@ func NewProvisionController( kubeVersion string, options ...func(*ProvisionController) error, ) *ProvisionController { - identity := uuid.NewUUID() + id, err := os.Hostname() + if err != nil { + glog.Fatalf("error getting hostname: %v", err) + } + // add a uniquifier so that two processes on the same host don't accidentally both become active + id = id + "_" + string(uuid.NewUUID()) + component := provisionerName + "_" + id v1.AddToScheme(scheme.Scheme) broadcaster := record.NewBroadcaster() broadcaster.StartLogging(glog.Infof) broadcaster.StartRecordingToSink(&corev1.EventSinkImpl{Interface: client.CoreV1().Events(v1.NamespaceAll)}) - var eventRecorder record.EventRecorder - out, err := exec.Command("hostname").Output() - if err != nil { - eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("%s %s", provisionerName, string(identity))}) - } else { - eventRecorder = broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: fmt.Sprintf("%s %s %s", provisionerName, strings.TrimSpace(string(out)), string(identity))}) - } + eventRecorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: component}) controller := &ProvisionController{ client: client, provisionerName: provisionerName, provisioner: provisioner, kubeVersion: utilversion.MustParseSemantic(kubeVersion), - identity: identity, + id: id, + component: component, eventRecorder: eventRecorder, resyncPeriod: DefaultResyncPeriod, exponentialBackOffOnError: DefaultExponentialBackOffOnError, @@ -441,12 +418,9 @@ func NewProvisionController( leaseDuration: DefaultLeaseDuration, renewDeadline: DefaultRenewDeadline, retryPeriod: DefaultRetryPeriod, - termLimit: DefaultTermLimit, metricsPort: DefaultMetricsPort, metricsAddress: DefaultMetricsAddress, metricsPath: DefaultMetricsPath, - leaderElectors: make(map[types.UID]*leaderelection.LeaderElector), - leaderElectorsMutex: &sync.Mutex{}, hasRun: false, hasRunLock: &sync.Mutex{}, } @@ -479,7 +453,6 @@ func NewProvisionController( return client.CoreV1().PersistentVolumeClaims(v1.NamespaceAll).Watch(options) }, } - controller.claimSource = claimSource claimHandler := cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { controller.enqueueWork(controller.claimQueue, obj) }, @@ -618,46 +591,87 @@ func (ctrl *ProvisionController) forgetWork(queue workqueue.RateLimitingInterfac // Run starts all of this controller's control loops func (ctrl *ProvisionController) Run(stopCh <-chan struct{}) { - glog.Infof("Starting provisioner controller %s!", string(ctrl.identity)) - defer utilruntime.HandleCrash() - defer ctrl.claimQueue.ShutDown() - defer ctrl.volumeQueue.ShutDown() - ctrl.hasRunLock.Lock() - ctrl.hasRun = true - ctrl.hasRunLock.Unlock() - if ctrl.metricsPort > 0 { - prometheus.MustRegister([]prometheus.Collector{ - metrics.PersistentVolumeClaimProvisionTotal, - metrics.PersistentVolumeClaimProvisionFailedTotal, - metrics.PersistentVolumeClaimProvisionDurationSeconds, - metrics.PersistentVolumeDeleteTotal, - metrics.PersistentVolumeDeleteFailedTotal, - metrics.PersistentVolumeDeleteDurationSeconds, - }...) - http.Handle(ctrl.metricsPath, promhttp.Handler()) - address := net.JoinHostPort(ctrl.metricsAddress, strconv.FormatInt(int64(ctrl.metricsPort), 10)) - glog.Infof("Starting metrics server at %s\n", address) - go wait.Forever(func() { - err := http.ListenAndServe(address, nil) - if err != nil { - glog.Errorf("Failed to listen on %s: %v", address, err) - } - }, 5*time.Second) - } + run := func(stopCh <-chan struct{}) { + glog.Infof("Starting provisioner controller %s!", ctrl.component) + defer utilruntime.HandleCrash() + defer ctrl.claimQueue.ShutDown() + defer ctrl.volumeQueue.ShutDown() + + ctrl.hasRunLock.Lock() + ctrl.hasRun = true + ctrl.hasRunLock.Unlock() + if ctrl.metricsPort > 0 { + prometheus.MustRegister([]prometheus.Collector{ + metrics.PersistentVolumeClaimProvisionTotal, + metrics.PersistentVolumeClaimProvisionFailedTotal, + metrics.PersistentVolumeClaimProvisionDurationSeconds, + metrics.PersistentVolumeDeleteTotal, + metrics.PersistentVolumeDeleteFailedTotal, + metrics.PersistentVolumeDeleteDurationSeconds, + }...) + http.Handle(ctrl.metricsPath, promhttp.Handler()) + address := net.JoinHostPort(ctrl.metricsAddress, strconv.FormatInt(int64(ctrl.metricsPort), 10)) + glog.Infof("Starting metrics server at %s\n", address) + go wait.Forever(func() { + err := http.ListenAndServe(address, nil) + if err != nil { + glog.Errorf("Failed to listen on %s: %v", address, err) + } + }, 5*time.Second) + } + + // If a SharedInformer has been passed in, this controller should not + // call Run again + if ctrl.claimInformer == nil { + go ctrl.claimController.Run(stopCh) + } + if ctrl.volumeInformer == nil { + go ctrl.volumeController.Run(stopCh) + } + if ctrl.classInformer == nil { + go ctrl.classController.Run(stopCh) + } + + if !cache.WaitForCacheSync(stopCh, ctrl.claimController.HasSynced, ctrl.volumeController.HasSynced, ctrl.classController.HasSynced) { + return + } + + for i := 0; i < ctrl.threadiness; i++ { + go wait.Until(ctrl.runClaimWorker, time.Second, stopCh) + go wait.Until(ctrl.runVolumeWorker, time.Second, stopCh) + } - go ctrl.claimController.Run(stopCh) - go ctrl.volumeController.Run(stopCh) - go ctrl.classController.Run(stopCh) + glog.Infof("Started provisioner controller %s!", ctrl.component) - for i := 0; i < ctrl.threadiness; i++ { - go wait.Until(ctrl.runClaimWorker, time.Second, stopCh) - go wait.Until(ctrl.runVolumeWorker, time.Second, stopCh) + <-stopCh } - glog.Infof("Started provisioner controller %s!", string(ctrl.identity)) + rl, err := resourcelock.New("endpoints", + "kube-system", + strings.Replace(ctrl.provisionerName, "/", "-", -1), + ctrl.client.CoreV1(), + resourcelock.ResourceLockConfig{ + Identity: ctrl.id, + EventRecorder: ctrl.eventRecorder, + }) + if err != nil { + glog.Fatalf("error creating lock: %v", err) + } - <-stopCh + leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{ + Lock: rl, + LeaseDuration: ctrl.leaseDuration, + RenewDeadline: ctrl.renewDeadline, + RetryPeriod: ctrl.retryPeriod, + Callbacks: leaderelection.LeaderCallbacks{ + OnStartedLeading: run, + OnStoppedLeading: func() { + glog.Fatalf("leaderelection lost") + }, + }, + }) + panic("unreachable") } func (ctrl *ProvisionController) runClaimWorker() { @@ -789,16 +803,9 @@ func (ctrl *ProvisionController) syncClaim(obj interface{}) error { } if ctrl.shouldProvision(claim) { - ctrl.leaderElectorsMutex.Lock() - le, ok := ctrl.leaderElectors[claim.UID] - ctrl.leaderElectorsMutex.Unlock() - if ok && le.IsLeader() { - startTime := time.Now() - err := ctrl.provisionClaimOperation(claim) - ctrl.updateProvisionStats(claim, err, startTime) - return err - } - err := ctrl.lockProvisionClaimOperation(claim) + startTime := time.Now() + err := ctrl.provisionClaimOperation(claim) + ctrl.updateProvisionStats(claim, err, startTime) return err } return nil @@ -820,20 +827,6 @@ func (ctrl *ProvisionController) syncVolume(obj interface{}) error { return nil } -// removeRecord returns a claim with its leader election record annotation and -// ResourceVersion set blank -func (ctrl *ProvisionController) removeRecord(claim *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { - claimClone := claim.DeepCopy() - if claimClone.Annotations == nil { - claimClone.Annotations = make(map[string]string) - } - claimClone.Annotations[rl.LeaderElectionRecordAnnotationKey] = "" - - claimClone.ResourceVersion = "" - - return claimClone, nil -} - // shouldProvision returns whether a claim should have a volume provisioned for // it, i.e. whether a Provision is "desired" func (ctrl *ProvisionController) shouldProvision(claim *v1.PersistentVolumeClaim) bool { @@ -913,68 +906,6 @@ func (ctrl *ProvisionController) canProvision(claim *v1.PersistentVolumeClaim) e return nil } -// lockProvisionClaimOperation wraps provisionClaimOperation. In case other -// controllers are serving the same claims, to prevent them all from creating -// volumes for a claim & racing to submit their PV, each controller creates a -// LeaderElector to instead race for the leadership (lock), where only the -// leader is tasked with provisioning & may try to do so. Returns error, which -// indicates whether provisioning should be retried (requeue the claim) or not -func (ctrl *ProvisionController) lockProvisionClaimOperation(claim *v1.PersistentVolumeClaim) error { - rl := rl.ProvisionPVCLock{ - PVCMeta: claim.ObjectMeta, - Client: ctrl.client, - LockConfig: rl.Config{ - Identity: string(ctrl.identity), - EventRecorder: ctrl.eventRecorder, - }, - } - var provisionErr error - le, err := leaderelection.NewLeaderElector(leaderelection.Config{ - Lock: &rl, - LeaseDuration: ctrl.leaseDuration, - RenewDeadline: ctrl.renewDeadline, - RetryPeriod: ctrl.retryPeriod, - TermLimit: ctrl.termLimit, - Callbacks: leaderelection.LeaderCallbacks{ - OnStartedLeading: func(_ <-chan struct{}) { - startTime := time.Now() - provisionErr = ctrl.provisionClaimOperation(claim) - ctrl.updateProvisionStats(claim, provisionErr, startTime) - }, - OnStoppedLeading: func() { - }, - }, - }) - if err != nil { - glog.Errorf("Error creating LeaderElector, can't provision for claim %q: %v", claimToClaimKey(claim), err) - return err - } - - ctrl.leaderElectorsMutex.Lock() - ctrl.leaderElectors[claim.UID] = le - ctrl.leaderElectorsMutex.Unlock() - - // To determine when to stop trying to acquire/renew the lock, watch for - // provisioning success/failure. (The leader could get the result of its - // operation but it has to watch anyway) - stopCh := make(chan struct{}) - successCh, err := ctrl.watchProvisioning(claim, stopCh) - if err != nil { - glog.Errorf("Error watching for provisioning success, can't provision for claim %q: %v", claimToClaimKey(claim), err) - return err - } - - le.Run(successCh) - - close(stopCh) - - ctrl.leaderElectorsMutex.Lock() - delete(ctrl.leaderElectors, claim.UID) - ctrl.leaderElectorsMutex.Unlock() - - return provisionErr -} - func (ctrl *ProvisionController) updateProvisionStats(claim *v1.PersistentVolumeClaim, err error, startTime time.Time) { class := "" if claim.Spec.StorageClassName != nil { @@ -1009,9 +940,13 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol // A previous doProvisionClaim may just have finished while we were waiting for // the locks. Check that PV (with deterministic name) hasn't been provisioned // yet. + namespace := claim.GetNamespace() pvName := ctrl.getProvisionedVolumeNameForClaim(claim) - volume, err := ctrl.client.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{}) - if err == nil && volume != nil { + _, exists, err := ctrl.volumes.GetByKey(fmt.Sprintf("%s/%s", namespace, pvName)) + if err != nil { + glog.Errorf("Error getting claim %q's volume: %v", claimToClaimKey(claim), err) + return nil + } else if exists { // Volume has been already provisioned, nothing to do. glog.V(4).Infof("provisionClaimOperation [%s]: volume already exists, skipping", claimToClaimKey(claim)) return nil @@ -1093,7 +1028,7 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol ctrl.eventRecorder.Event(claim, v1.EventTypeNormal, "Provisioning", fmt.Sprintf("External provisioner is provisioning volume for claim %q", claimToClaimKey(claim))) - volume, err = ctrl.provisioner.Provision(options) + volume, err := ctrl.provisioner.Provision(options) if err != nil { if ierr, ok := err.(*IgnoredError); ok { // Provision ignored, do nothing and hope another provisioner will provision it. @@ -1166,159 +1101,6 @@ func (ctrl *ProvisionController) provisionClaimOperation(claim *v1.PersistentVol return nil } -// watchProvisioning returns a channel to which it sends the results of all -// provisioning attempts for the given claim. The PVC being modified to no -// longer need provisioning is considered a success. -func (ctrl *ProvisionController) watchProvisioning(claim *v1.PersistentVolumeClaim, stopChannel chan struct{}) (<-chan bool, error) { - stopWatchPVC := make(chan struct{}) - pvcCh, err := ctrl.watchPVC(claim, stopWatchPVC) - if err != nil { - glog.Infof("cannot start watcher for PVC %s/%s: %v", claim.Namespace, claim.Name, err) - return nil, err - } - - successCh := make(chan bool, 0) - - go func() { - defer close(stopWatchPVC) - defer close(successCh) - - for { - select { - case _ = <-stopChannel: - return - - case event := <-pvcCh: - switch event.Object.(type) { - case *v1.PersistentVolumeClaim: - // PVC changed - claim := event.Object.(*v1.PersistentVolumeClaim) - glog.V(4).Infof("claim update received: %s %s/%s %s", event.Type, claim.Namespace, claim.Name, claim.Status.Phase) - switch event.Type { - case watch.Added, watch.Modified: - if claim.Spec.VolumeName != "" { - successCh <- true - } else if !ctrl.shouldProvision(claim) { - glog.Infof("claim %s/%s was modified to not ask for this provisioner", claim.Namespace, claim.Name) - successCh <- true - } - - case watch.Deleted: - glog.Infof("claim %s/%s was deleted", claim.Namespace, claim.Name) - successCh <- true - - case watch.Error: - glog.Infof("claim %s/%s watcher failed", claim.Namespace, claim.Name) - successCh <- true - default: - } - case *v1.Event: - // Event received - claimEvent := event.Object.(*v1.Event) - glog.V(4).Infof("claim event received: %s %s/%s %s/%s %s", event.Type, claimEvent.Namespace, claimEvent.Name, claimEvent.InvolvedObject.Namespace, claimEvent.InvolvedObject.Name, claimEvent.Reason) - if claimEvent.Reason == "ProvisioningSucceeded" { - successCh <- true - } else if claimEvent.Reason == "ProvisioningFailed" { - successCh <- false - } - } - } - } - }() - - return successCh, nil -} - -// watchPVC returns a watch on the given PVC and ProvisioningFailed & -// ProvisioningSucceeded events involving it -func (ctrl *ProvisionController) watchPVC(claim *v1.PersistentVolumeClaim, stopChannel chan struct{}) (<-chan watch.Event, error) { - options := metav1.ListOptions{ - FieldSelector: "metadata.name=" + claim.Name, - Watch: true, - ResourceVersion: claim.ResourceVersion, - } - - pvcWatch, err := ctrl.claimSource.Watch(options) - if err != nil { - return nil, err - } - - failWatch, err := ctrl.getPVCEventWatch(claim, v1.EventTypeWarning, "ProvisioningFailed") - if err != nil { - pvcWatch.Stop() - return nil, err - } - - successWatch, err := ctrl.getPVCEventWatch(claim, v1.EventTypeNormal, "ProvisioningSucceeded") - if err != nil { - failWatch.Stop() - pvcWatch.Stop() - return nil, err - } - - eventCh := make(chan watch.Event, 0) - - go func() { - defer successWatch.Stop() - defer failWatch.Stop() - defer pvcWatch.Stop() - defer close(eventCh) - - for { - select { - case _ = <-stopChannel: - return - - case pvcEvent, ok := <-pvcWatch.ResultChan(): - if !ok { - return - } - eventCh <- pvcEvent - - case failEvent, ok := <-failWatch.ResultChan(): - if !ok { - return - } - eventCh <- failEvent - - case successEvent, ok := <-successWatch.ResultChan(): - if !ok { - return - } - eventCh <- successEvent - } - } - }() - - return eventCh, nil -} - -// getPVCEventWatch returns a watch on the given PVC for the given event from -// this point forward. -func (ctrl *ProvisionController) getPVCEventWatch(claim *v1.PersistentVolumeClaim, eventType, reason string) (watch.Interface, error) { - claimKind := "PersistentVolumeClaim" - claimUID := string(claim.UID) - fieldSelector := ctrl.client.CoreV1().Events(claim.Namespace).GetFieldSelector(&claim.Name, &claim.Namespace, &claimKind, &claimUID).String() + ",type=" + eventType + ",reason=" + reason - - list, err := ctrl.client.CoreV1().Events(claim.Namespace).List(metav1.ListOptions{ - FieldSelector: fieldSelector, - }) - if err != nil { - return nil, err - } - - resourceVersion := "" - if len(list.Items) >= 1 { - resourceVersion = list.Items[len(list.Items)-1].ResourceVersion - } - - return ctrl.client.CoreV1().Events(claim.Namespace).Watch(metav1.ListOptions{ - FieldSelector: fieldSelector, - Watch: true, - ResourceVersion: resourceVersion, - }) -} - // deleteVolumeOperation attempts to delete the volume backing the given // volume. Returns error, which indicates whether deletion should be retried // (requeue the volume) or not @@ -1329,10 +1111,21 @@ func (ctrl *ProvisionController) deleteVolumeOperation(volume *v1.PersistentVolu // Our check does not have to be as sophisticated as PV controller's, we can // trust that the PV controller has set the PV to Released/Failed and it's // ours to delete - newVolume, err := ctrl.client.CoreV1().PersistentVolumes().Get(volume.Name, metav1.GetOptions{}) + newVolumeObject, exists, err := ctrl.volumes.GetByKey(volume.Name) if err != nil { + glog.Errorf("error getting persistentvolume by name %s: %v, skipping", volume.Name, err) + return nil + } else if !exists { + glog.Infof("persistentvolume %s does not exist, skipping", volume.Name) + return nil + } + + newVolume, ok := newVolumeObject.(*v1.PersistentVolume) + if !ok { + glog.Errorf("error getting persistentvolume %s/%s, skipping", volume.Namespace, volume.Name) return nil } + if !ctrl.shouldDelete(newVolume) { glog.Infof("volume %q no longer needs deletion, skipping", volume.Name) return nil diff --git a/lib/controller/controller_test.go b/lib/controller/controller_test.go index 98dd071c341..30f34861e03 100644 --- a/lib/controller/controller_test.go +++ b/lib/controller/controller_test.go @@ -20,8 +20,6 @@ import ( "errors" "fmt" "reflect" - "strconv" - "sync" "testing" "time" @@ -33,15 +31,12 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/scheme" - fakev1core "k8s.io/client-go/kubernetes/typed/core/v1/fake" testclient "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" - fcache "k8s.io/client-go/tools/cache/testing" ref "k8s.io/client-go/tools/reference" utilversion "k8s.io/kubernetes/pkg/util/version" ) @@ -338,78 +333,6 @@ func TestTopologyParams(t *testing.T) { } } -func TestMultipleControllers(t *testing.T) { - tests := []struct { - name string - provisionerName string - numControllers int - numClaims int - expectedCalls int - }{ - { - name: "call provision exactly once", - provisionerName: "foo.bar/baz", - numControllers: 5, - numClaims: 1, - expectedCalls: 1, - }, - } - for _, test := range tests { - client := fake.NewSimpleClientset() - - // Create a reactor to reject Updates if object has already been modified, - // like etcd. - claimSource := fcache.NewFakePVCControllerSource() - reactor := claimReactor{ - fake: &fakev1core.FakeCoreV1{Fake: &client.Fake}, - claims: make(map[string]*v1.PersistentVolumeClaim), - lock: sync.Mutex{}, - claimSource: claimSource, - } - reactor.claims["claim-1"] = newClaim("claim-1", "uid-1-1", "class-1", test.provisionerName, "", nil) - client.PrependReactor("update", "persistentvolumeclaims", reactor.React) - client.PrependReactor("get", "persistentvolumeclaims", reactor.React) - - // Create a fake watch so each controller can get ProvisioningSucceeded - fakeWatch := watch.NewFakeWithChanSize(test.numControllers, false) - client.PrependWatchReactor("events", testclient.DefaultWatchReactor(fakeWatch, nil)) - client.PrependReactor("create", "events", func(action testclient.Action) (bool, runtime.Object, error) { - obj := action.(testclient.CreateAction).GetObject() - for i := 0; i < test.numControllers; i++ { - fakeWatch.Add(obj) - } - return true, obj, nil - }) - - provisioner := newTestProvisioner() - ctrls := make([]*ProvisionController, test.numControllers) - stopChs := make([]chan struct{}, test.numControllers) - for i := 0; i < test.numControllers; i++ { - ctrls[i] = NewProvisionController(client, test.provisionerName, provisioner, defaultServerVersion, CreateProvisionedPVInterval(10*time.Millisecond)) - ctrls[i].claimSource = claimSource - ctrls[i].claims.Add(newClaim("claim-1", "uid-1-1", "class-1", test.provisionerName, "", nil)) - ctrls[i].classes.Add(newBetaStorageClass("class-1", test.provisionerName)) - stopChs[i] = make(chan struct{}) - } - - for i := 0; i < test.numControllers; i++ { - go ctrls[i].syncClaim(newClaim("claim-1", "uid-1-1", "class-1", test.provisionerName, "", nil)) - } - - // Sleep for 3 election retry periods - time.Sleep(3 * ctrls[0].retryPeriod) - - if test.expectedCalls != len(provisioner.provisionCalls) { - t.Logf("test case: %s", test.name) - t.Errorf("expected provision calls:\n %v\n but got:\n %v\n", test.expectedCalls, len(provisioner.provisionCalls)) - } - - for _, stopCh := range stopChs { - close(stopCh) - } - } -} - func TestShouldProvision(t *testing.T) { tests := []struct { name string @@ -770,8 +693,7 @@ func newTestProvisionController( CreateProvisionedPVInterval(10*time.Millisecond), LeaseDuration(2*resyncPeriod), RenewDeadline(resyncPeriod), - RetryPeriod(resyncPeriod/2), - TermLimit(2*resyncPeriod)) + RetryPeriod(resyncPeriod/2)) return ctrl } @@ -803,7 +725,6 @@ func newTestProvisionControllerSharedInformers( LeaseDuration(2*resyncPeriod), RenewDeadline(resyncPeriod), RetryPeriod(resyncPeriod/2), - TermLimit(2*resyncPeriod), ClaimsInformer(claimInformer), VolumesInformer(volumeInformer), ClassesInformer(classInformer)) @@ -1110,48 +1031,3 @@ func (i *ignoredProvisioner) Provision(options VolumeOptions) (*v1.PersistentVol func (i *ignoredProvisioner) Delete(volume *v1.PersistentVolume) error { return nil } - -type claimReactor struct { - fake *fakev1core.FakeCoreV1 - claims map[string]*v1.PersistentVolumeClaim - lock sync.Mutex - claimSource *fcache.FakePVCControllerSource -} - -func (r *claimReactor) React(action testclient.Action) (handled bool, ret runtime.Object, err error) { - r.lock.Lock() - defer r.lock.Unlock() - switch { - case action.Matches("update", "persistentvolumeclaims"): - obj := action.(testclient.UpdateAction).GetObject() - - claim := obj.(*v1.PersistentVolumeClaim) - - // Check and bump object version - storedClaim, found := r.claims[claim.Name] - if found { - storedVer, _ := strconv.Atoi(storedClaim.ResourceVersion) - requestedVer, _ := strconv.Atoi(claim.ResourceVersion) - if storedVer != requestedVer { - return true, obj, errors.New("VersionError") - } - claim.ResourceVersion = strconv.Itoa(storedVer + 1) - } else { - return true, nil, fmt.Errorf("Cannot update claim %s: claim not found", claim.Name) - } - - r.claims[claim.Name] = claim - r.claimSource.Modify(claim) - return true, claim, nil - case action.Matches("get", "persistentvolumeclaims"): - name := action.(testclient.GetAction).GetName() - claim, found := r.claims[name] - if found { - claimClone := claim.DeepCopy() - return true, claimClone, nil - } - return true, nil, fmt.Errorf("Cannot find claim %s", name) - } - - return false, nil, nil -} diff --git a/lib/leaderelection/resourcelock/provisionpvclock.go b/lib/leaderelection/resourcelock/provisionpvclock.go deleted file mode 100644 index ee516266ed1..00000000000 --- a/lib/leaderelection/resourcelock/provisionpvclock.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package resourcelock - -import ( - "encoding/json" - "errors" - "fmt" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" -) - -// ProvisionPVCLock is a lock on an existing PVC to provision a PV for -type ProvisionPVCLock struct { - // PVCMeta should contain a Name and a Namespace of a PVC - // object that the LeaderElector will attempt to lead. - PVCMeta metav1.ObjectMeta - Client clientset.Interface - LockConfig Config - p *v1.PersistentVolumeClaim -} - -// Get returns the LeaderElectionRecord -func (pl *ProvisionPVCLock) Get() (*LeaderElectionRecord, error) { - var record LeaderElectionRecord - var err error - pl.p, err = pl.Client.Core().PersistentVolumeClaims(pl.PVCMeta.Namespace).Get(pl.PVCMeta.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - // TODO there should be a way to give up if the pvc is already bound...we are doing a Get regardless - if pl.p.Annotations == nil { - pl.p.Annotations = make(map[string]string) - } - if recordBytes, found := pl.p.Annotations[LeaderElectionRecordAnnotationKey]; found { - if err := json.Unmarshal([]byte(recordBytes), &record); err != nil { - return nil, err - } - } - return &record, nil -} - -// Create is not allowed, the PVC should already exist -func (pl *ProvisionPVCLock) Create(ler LeaderElectionRecord) error { - return errors.New("create not allowed, PVC should already exist") -} - -// Update will update and existing annotation on a given resource. -func (pl *ProvisionPVCLock) Update(ler LeaderElectionRecord) error { - if pl.p == nil { - return errors.New("PVC not initialized, call get first") - } - recordBytes, err := json.Marshal(ler) - if err != nil { - return err - } - pl.p.Annotations[LeaderElectionRecordAnnotationKey] = string(recordBytes) - pl.p, err = pl.Client.Core().PersistentVolumeClaims(pl.PVCMeta.Namespace).Update(pl.p) - return err -} - -// RecordEvent in leader election while adding meta-data -func (pl *ProvisionPVCLock) RecordEvent(s string) { - // events := fmt.Sprintf("%v %v", pl.LockConfig.Identity, s) - // pl.LockConfig.EventRecorder.Eventf(&v1.PersistentVolumeClaim{ObjectMeta: pl.p.ObjectMeta}, v1.EventTypeNormal, "LeaderElection", events) -} - -// Describe is used to convert details on current resource lock -// into a string -func (pl *ProvisionPVCLock) Describe() string { - return fmt.Sprintf("to provision for pvc %v/%v", pl.PVCMeta.Namespace, pl.PVCMeta.Name) -} - -// Identity returns the Identity of the lock -func (pl *ProvisionPVCLock) Identity() string { - return pl.LockConfig.Identity -} diff --git a/nfs-client/deploy/auth/clusterrole.yaml b/nfs-client/deploy/auth/clusterrole.yaml index 0c29a3c045e..0ecb088bd50 100644 --- a/nfs-client/deploy/auth/clusterrole.yaml +++ b/nfs-client/deploy/auth/clusterrole.yaml @@ -14,4 +14,7 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] \ No newline at end of file diff --git a/nfs-client/deploy/auth/openshift-clusterrole.yaml b/nfs-client/deploy/auth/openshift-clusterrole.yaml index beabc8f0f6b..2f50f5b2d38 100644 --- a/nfs-client/deploy/auth/openshift-clusterrole.yaml +++ b/nfs-client/deploy/auth/openshift-clusterrole.yaml @@ -14,4 +14,7 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] diff --git a/nfs/README.md b/nfs/README.md index 1c0cd88b426..e95f4a67707 100644 --- a/nfs/README.md +++ b/nfs/README.md @@ -2,10 +2,10 @@ [![Docker Repository on Quay](https://quay.io/repository/kubernetes_incubator/nfs-provisioner/status "Docker Repository on Quay")](https://quay.io/repository/kubernetes_incubator/nfs-provisioner) ``` -quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9 +quay.io/kubernetes_incubator/nfs-provisioner ``` -nfs-provisioner is an out-of-tree dynamic provisioner for Kubernetes 1.4. You can use it to quickly & easily deploy shared storage that works almost anywhere. Or it can help you write your own out-of-tree dynamic provisioner by serving as an example implementation of the requirements detailed in [the proposal](https://github.com/kubernetes/kubernetes/pull/30285). Go [here](./docs/demo) for a demo of how to use it and [here](../docs/demo/hostpath-provisioner) for an example of how to write your own. +nfs-provisioner is an out-of-tree dynamic provisioner for Kubernetes 1.4+. You can use it to quickly & easily deploy shared storage that works almost anywhere. Or it can help you write your own out-of-tree dynamic provisioner by serving as an example implementation of the requirements detailed in [the proposal](https://github.com/kubernetes/kubernetes/pull/30285). Go [here](./docs/demo) for a demo of how to use it and [here](../docs/demo/hostpath-provisioner) for an example of how to write your own. It works just like in-tree dynamic provisioners: a `StorageClass` object can specify an instance of nfs-provisioner to be its `provisioner` like it specifies in-tree provisioners such as GCE or AWS. Then, the instance of nfs-provisioner will watch for `PersistentVolumeClaims` that ask for the `StorageClass` and automatically create NFS-backed `PersistentVolumes` for them. For more information on how dynamic provisioning works, see [the docs](http://kubernetes.io/docs/user-guide/persistent-volumes/) or [this blog post](http://blog.kubernetes.io/2016/10/dynamic-provisioning-and-storage-in-kubernetes.html). @@ -62,16 +62,11 @@ Deleting the `PersistentVolumeClaim` will cause the provisioner to delete the `P Deleting the provisioner deployment will cause any outstanding `PersistentVolumes` to become unusable for as long as the provisioner is gone. ## Running -Go [here](./docs/demo) for a demo of how to run nfs-provisioner. You may also/instead want to read the (dryer but more detailed) following docs. - -To authorize nfs-provisioner on a Kubernetes cluster (only if you have RBAC and/or PSP enabled or are running OpenShift) see [Authorization](docs/authorization.md). To deploy nfs-provisioner on a Kubernetes cluster see [Deployment](docs/deployment.md). To use nfs-provisioner once it is deployed see [Usage](docs/usage.md). -For information on running multiple instances of nfs-provisioner see [Running Multiple Provisioners](docs/multiple.md). - ## [Changelog](CHANGELOG.md) Releases done here in external-storage will not have corresponding git tags (external-storage's git tags are reserved for versioning the library), so to keep track of releases check this README, the [changelog](CHANGELOG.md), or [Quay](https://quay.io/repository/kubernetes_incubator/nfs-provisioner) diff --git a/nfs/deploy/kubernetes/auth/clusterrolebinding.yaml b/nfs/deploy/kubernetes/auth/clusterrolebinding.yaml deleted file mode 100644 index 37410aff8bf..00000000000 --- a/nfs/deploy/kubernetes/auth/clusterrolebinding.yaml +++ /dev/null @@ -1,14 +0,0 @@ -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: run-nfs-provisioner - namespace: default -subjects: - - kind: ServiceAccount - name: nfs-provisioner - namespace: default -# update namespace above to your namespace in order to make this work -roleRef: - kind: ClusterRole - name: nfs-provisioner-runner - apiGroup: rbac.authorization.k8s.io diff --git a/nfs/deploy/kubernetes/auth/daemonset-sa.yaml b/nfs/deploy/kubernetes/auth/daemonset-sa.yaml deleted file mode 100644 index 6c16195998c..00000000000 --- a/nfs/deploy/kubernetes/auth/daemonset-sa.yaml +++ /dev/null @@ -1,51 +0,0 @@ -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: nfs-provisioner -spec: - template: - metadata: - labels: - app: nfs-provisioner - spec: - serviceAccount: nfs-provisioner - nodeSelector: - app: nfs-provisioner - containers: - - name: nfs-provisioner - image: quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9 - ports: - - name: nfs - containerPort: 2049 - hostPort: 2049 - - name: mountd - containerPort: 20048 - - name: rpcbind - containerPort: 111 - - name: rpcbind-udp - containerPort: 111 - protocol: UDP - securityContext: - capabilities: - add: - - DAC_READ_SEARCH - - SYS_RESOURCE - args: - - "-provisioner=example.com/nfs" - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: export-volume - mountPath: /export - volumes: - - name: export-volume - hostPath: - path: /srv diff --git a/nfs/deploy/kubernetes/auth/deployment-sa.yaml b/nfs/deploy/kubernetes/auth/deployment-sa.yaml deleted file mode 100644 index 07efa30e4e0..00000000000 --- a/nfs/deploy/kubernetes/auth/deployment-sa.yaml +++ /dev/null @@ -1,73 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: nfs-provisioner - labels: - app: nfs-provisioner -spec: - ports: - - name: nfs - port: 2049 - - name: mountd - port: 20048 - - name: rpcbind - port: 111 - - name: rpcbind-udp - port: 111 - protocol: UDP - selector: - app: nfs-provisioner ---- -kind: Deployment -apiVersion: extensions/v1beta1 -metadata: - name: nfs-provisioner -spec: - replicas: 1 - strategy: - type: Recreate - template: - metadata: - labels: - app: nfs-provisioner - spec: - serviceAccount: nfs-provisioner - containers: - - name: nfs-provisioner - image: quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9 - ports: - - name: nfs - containerPort: 2049 - - name: mountd - containerPort: 20048 - - name: rpcbind - containerPort: 111 - - name: rpcbind-udp - containerPort: 111 - protocol: UDP - securityContext: - capabilities: - add: - - DAC_READ_SEARCH - - SYS_RESOURCE - args: - - "-provisioner=example.com/nfs" - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: SERVICE_NAME - value: nfs-provisioner - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: export-volume - mountPath: /export - volumes: - - name: export-volume - hostPath: - path: /srv diff --git a/nfs/deploy/kubernetes/auth/openshift-clusterrole.yaml b/nfs/deploy/kubernetes/auth/openshift-clusterrole.yaml deleted file mode 100644 index 89b9d63487b..00000000000 --- a/nfs/deploy/kubernetes/auth/openshift-clusterrole.yaml +++ /dev/null @@ -1,20 +0,0 @@ -kind: ClusterRole -apiVersion: v1 -metadata: - name: nfs-provisioner-runner -rules: - - apiGroups: [""] - resources: ["persistentvolumes"] - verbs: ["get", "list", "watch", "create", "delete"] - - apiGroups: [""] - resources: ["persistentvolumeclaims"] - verbs: ["get", "list", "watch", "update"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get", "list", "watch"] - - apiGroups: [""] - resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] - - apiGroups: [""] - resources: ["services", "endpoints"] - verbs: ["get"] diff --git a/nfs/deploy/kubernetes/auth/openshift-scc.yaml b/nfs/deploy/kubernetes/auth/openshift-scc.yaml deleted file mode 100644 index 3692892107e..00000000000 --- a/nfs/deploy/kubernetes/auth/openshift-scc.yaml +++ /dev/null @@ -1,35 +0,0 @@ -allowHostDirVolumePlugin: true -allowHostIPC: false -allowHostNetwork: false -allowHostPID: false -allowHostPorts: false -allowPrivilegedContainer: false -allowedCapabilities: -- DAC_READ_SEARCH -- SYS_RESOURCE -apiVersion: v1 -defaultAddCapabilities: null -fsGroup: - type: MustRunAs -kind: SecurityContextConstraints -metadata: - annotations: null - name: nfs-provisioner -priority: null -readOnlyRootFilesystem: false -requiredDropCapabilities: -- KILL -- MKNOD -- SYS_CHROOT -runAsUser: - type: RunAsAny -seLinuxContext: - type: MustRunAs -supplementalGroups: - type: RunAsAny -volumes: -- configMap -- downwardAPI -- emptyDir -- persistentVolumeClaim -- secret diff --git a/nfs/deploy/kubernetes/auth/pod-sa.yaml b/nfs/deploy/kubernetes/auth/pod-sa.yaml deleted file mode 100644 index 5b72949bde0..00000000000 --- a/nfs/deploy/kubernetes/auth/pod-sa.yaml +++ /dev/null @@ -1,33 +0,0 @@ -kind: Pod -apiVersion: v1 -metadata: - name: nfs-provisioner -spec: - serviceAccount: nfs-provisioner - containers: - - name: nfs-provisioner - image: quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9 - ports: - - name: nfs - containerPort: 2049 - - name: mountd - containerPort: 20048 - - name: rpcbind - containerPort: 111 - - name: rpcbind-udp - containerPort: 111 - protocol: UDP - securityContext: - capabilities: - add: - - DAC_READ_SEARCH - - SYS_RESOURCE - args: - - "-provisioner=example.com/nfs" - - "-grace-period=0" - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - imagePullPolicy: "IfNotPresent" diff --git a/nfs/deploy/kubernetes/auth/serviceaccount.yaml b/nfs/deploy/kubernetes/auth/serviceaccount.yaml deleted file mode 100644 index d76b2c713fa..00000000000 --- a/nfs/deploy/kubernetes/auth/serviceaccount.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: nfs-provisioner diff --git a/nfs/deploy/kubernetes/auth/statefulset-sa.yaml b/nfs/deploy/kubernetes/auth/statefulset-sa.yaml deleted file mode 100644 index c6bea4c79a0..00000000000 --- a/nfs/deploy/kubernetes/auth/statefulset-sa.yaml +++ /dev/null @@ -1,75 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: nfs-provisioner - labels: - app: nfs-provisioner -spec: - ports: - - name: nfs - port: 2049 - - name: mountd - port: 20048 - - name: rpcbind - port: 111 - - name: rpcbind-udp - port: 111 - protocol: UDP - selector: - app: nfs-provisioner ---- -kind: StatefulSet -apiVersion: apps/v1beta1 -metadata: - name: nfs-provisioner -spec: - serviceName: "nfs-provisioner" - replicas: 1 - template: - metadata: - labels: - app: nfs-provisioner - annotations: - pod.alpha.kubernetes.io/initialized: "true" - spec: - serviceAccount: nfs-provisioner - terminationGracePeriodSeconds: 0 - containers: - - name: nfs-provisioner - image: quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9 - ports: - - name: nfs - containerPort: 2049 - - name: mountd - containerPort: 20048 - - name: rpcbind - containerPort: 111 - - name: rpcbind-udp - containerPort: 111 - protocol: UDP - securityContext: - capabilities: - add: - - DAC_READ_SEARCH - - SYS_RESOURCE - args: - - "-provisioner=example.com/nfs" - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: SERVICE_NAME - value: nfs-provisioner - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: export-volume - mountPath: /export - volumes: - - name: export-volume - hostPath: - path: /srv diff --git a/nfs/deploy/kubernetes/daemonset.yaml b/nfs/deploy/kubernetes/daemonset.yaml deleted file mode 100644 index d80f052ef69..00000000000 --- a/nfs/deploy/kubernetes/daemonset.yaml +++ /dev/null @@ -1,50 +0,0 @@ -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: nfs-provisioner -spec: - template: - metadata: - labels: - app: nfs-provisioner - spec: - nodeSelector: - app: nfs-provisioner - containers: - - name: nfs-provisioner - image: quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9 - ports: - - name: nfs - containerPort: 2049 - hostPort: 2049 - - name: mountd - containerPort: 20048 - - name: rpcbind - containerPort: 111 - - name: rpcbind-udp - containerPort: 111 - protocol: UDP - securityContext: - capabilities: - add: - - DAC_READ_SEARCH - - SYS_RESOURCE - args: - - "-provisioner=example.com/nfs" - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: export-volume - mountPath: /export - volumes: - - name: export-volume - hostPath: - path: /srv diff --git a/nfs/deploy/kubernetes/deployment.yaml b/nfs/deploy/kubernetes/deployment.yaml index fee735c2620..977f062125b 100644 --- a/nfs/deploy/kubernetes/deployment.yaml +++ b/nfs/deploy/kubernetes/deployment.yaml @@ -1,3 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nfs-provisioner +--- kind: Service apiVersion: v1 metadata: @@ -31,9 +36,10 @@ spec: labels: app: nfs-provisioner spec: + serviceAccount: nfs-provisioner containers: - name: nfs-provisioner - image: quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9 + image: quay.io/kubernetes_incubator/nfs-provisioner:latest ports: - name: nfs containerPort: 2049 diff --git a/nfs/deploy/kubernetes/pod.yaml b/nfs/deploy/kubernetes/pod.yaml index a7c3d8d09f3..78fa0e756e1 100644 --- a/nfs/deploy/kubernetes/pod.yaml +++ b/nfs/deploy/kubernetes/pod.yaml @@ -1,11 +1,17 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nfs-provisioner +--- kind: Pod apiVersion: v1 metadata: name: nfs-provisioner spec: + serviceAccount: nfs-provisioner containers: - name: nfs-provisioner - image: quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9 + image: quay.io/kubernetes_incubator/nfs-provisioner:latest ports: - name: nfs containerPort: 2049 diff --git a/nfs/deploy/kubernetes/auth/psp.yaml b/nfs/deploy/kubernetes/psp.yaml similarity index 100% rename from nfs/deploy/kubernetes/auth/psp.yaml rename to nfs/deploy/kubernetes/psp.yaml diff --git a/nfs/deploy/kubernetes/auth/clusterrole.yaml b/nfs/deploy/kubernetes/rbac.yaml similarity index 53% rename from nfs/deploy/kubernetes/auth/clusterrole.yaml rename to nfs/deploy/kubernetes/rbac.yaml index 4aec8b6a882..8ae4657a3ca 100644 --- a/nfs/deploy/kubernetes/auth/clusterrole.yaml +++ b/nfs/deploy/kubernetes/rbac.yaml @@ -14,11 +14,29 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] - apiGroups: [""] - resources: ["services", "endpoints"] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] + - apiGroups: [""] + resources: ["services"] verbs: ["get"] - apiGroups: ["extensions"] resources: ["podsecuritypolicies"] resourceNames: ["nfs-provisioner"] verbs: ["use"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: run-nfs-provisioner + namespace: default +subjects: + - kind: ServiceAccount + name: nfs-provisioner + namespace: default +# update namespace above to your namespace in order to make this work +roleRef: + kind: ClusterRole + name: nfs-provisioner-runner + apiGroup: rbac.authorization.k8s.io diff --git a/nfs/deploy/kubernetes/statefulset.yaml b/nfs/deploy/kubernetes/statefulset.yaml index 0cea5f1b9c0..dfdaa5f8efd 100644 --- a/nfs/deploy/kubernetes/statefulset.yaml +++ b/nfs/deploy/kubernetes/statefulset.yaml @@ -1,3 +1,8 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nfs-provisioner +--- kind: Service apiVersion: v1 metadata: @@ -32,10 +37,11 @@ spec: annotations: pod.alpha.kubernetes.io/initialized: "true" spec: + serviceAccount: nfs-provisioner terminationGracePeriodSeconds: 0 containers: - name: nfs-provisioner - image: quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9 + image: quay.io/kubernetes_incubator/nfs-provisioner:latest ports: - name: nfs containerPort: 2049 diff --git a/nfs/docs/authorization.md b/nfs/docs/authorization.md deleted file mode 100644 index 2de7632de35..00000000000 --- a/nfs/docs/authorization.md +++ /dev/null @@ -1,103 +0,0 @@ -# Authorization - -If you chose to run the provisioner in Kubernetes you may need to grant it authorization to make the API requests and syscalls it needs to. Creating `PersistentVolumes` is normally an administrator's responsibility and the authorization policies of Kubernetes & OpenShift will by default deny a pod the authorization to make such API requests and syscalls. A Kubernetes RBAC API request denial looks like this: - ->E0124 20:10:01.475115 1 reflector.go:199] github.com/kubernetes-incubator/nfs-provisioner/vendor/k8s.io/client-go/tools/cache/reflector.go:94: Failed to list *v1beta1.StorageClass: the server does not allow access to the requested resource (get storageclasses.storage.k8s.io) - -Find out what authorization plugin or policy implementation your cluster uses, if any, and follow one of the below sections. - -* [PSP and/or RBAC](#rbac) -* [OpenShift](#openshift) - -## PSP and/or RBAC - -Your cluster may have [PSPs](https://kubernetes.io/docs/user-guide/pod-security-policy/) (Pod Security Policies) and/or [RBAC](https://kubernetes.io/docs/admin/authorization/) (Role-Based Access Control) enabled. You should probably [take advantage of both](https://github.com/kubernetes/kubernetes/tree/release-1.5/examples/podsecuritypolicy/rbac) if you want to use one or the other at all but if your cluster has only one enabled: -* PSP: you need only create the PSP - - ```console - $ kubectl create -f deploy/kubernetes/auth/psp.yaml - podsecuritypolicy "nfs-provisioner" created - ``` -* RBAC: ignore the step where you create the PSP, the `ClusterRole` will still work even if the PSP doesn't exist - -RBAC doesn't have a bootstrap `ClusterRole` with the permissions nfs-provisioner needs so you need to create a `ClusterRole` that lists the permissions plus a `ClusterRoleBinding` that grants the permissions to the service account the nfs-provisioner pod will be assigned. - -Create the service account. Later, you will have to ensure the pod template of the deployment/statefulset/daemonset specifies this service account. - -```console -$ kubectl create -f deploy/kubernetes/auth/serviceaccount.yaml -serviceaccounts/nfs-provisioner -``` - -Create the PSP. - -```console -$ kubectl create -f deploy/kubernetes/auth/psp.yaml -serviceaccounts/nfs-provisioner -``` - -`deploy/kubernetes/auth/clusterrole.yaml` lists all the permissions nfs-provisioner needs. - -Create the `ClusterRole`. - -```console -$ kubectl create -f deploy/kubernetes/auth/clusterrole.yaml -clusterrole "nfs-provisioner-runner" created -``` - -`deploy/kubernetes/auth/clusterrolebinding.yaml` binds the "nfs-provisioner" service account in namespace `default` to your `ClusterRole`. Edit the service account name and namespace accordingly if you are not in the namespace `default` or named the service account something other than "nfs-provisioner". - -Create the `ClusterRoleBinding`. -```console -$ kubectl create -f deploy/kubernetes/auth/clusterrolebinding.yaml -clusterrolebinding "run-nfs-provisioner" created -``` - -Remember: later, you will have to ensure the pod template of the deployment/statefulset/daemonset specifies the service account you created. - -## OpenShift - -OpenShift by default has both [authorization policies](https://docs.openshift.com/container-platform/latest/admin_guide/manage_authorization_policy.html) and [security context constraints](https://docs.openshift.com/container-platform/latest/admin_guide/manage_scc.html) that deny an nfs-provisioner pod its needed permissions, so you need to create a new `ClusterRole` and SCC for your pod to use. - -Create the service account. Later, you will have to ensure the pod template of the deployment/statefulset/daemonset specifies this service account. - -``` -$ oc create -f deploy/kubernetes/auth/serviceaccount.yaml -serviceaccount "nfs-provisioner" created -``` - -`deploy/kubernetes/auth/openshift-scc.yaml` defines an SCC for your nfs-provisioner pod to validate against. - -Create the SCC. - -```console -$ oc create -f deploy/kubernetes/auth/openshift-scc.yaml -securitycontextconstraints "nfs-provisioner" created -``` - -Add the `nfs-provisioner` service account to the SCC. Change the service account name and namespace accordingly if you are not in the namespace `default` or named the service account something other than "nfs-provisioner". - -```console -$ oadm policy add-scc-to-user nfs-provisioner system:serviceaccount:default:nfs-provisioner -``` - -`deploy/kubernetes/auth/openshift-clusterrole.yaml` lists all the permissions nfs-provisioner needs. - -Create the `ClusterRole`. - -```console -$ oc create -f deploy/kubernetes/auth/openshift-clusterrole.yaml -clusterrole "nfs-provisioner-runner" created -``` - -Add the `ClusterRole` to the `nfs-provisioner` service account. Change the service account name and namespace accordingly if you are not in the namespace `default` or named the service account something other than "nfs-provisioner". - -```console -$ oadm policy add-cluster-role-to-user nfs-provisioner-runner system:serviceaccount:default:nfs-provisioner -``` - -Remember: later, you will have to ensure the pod template of the deployment/statefulset/daemonset specifies the service account you created. - ---- - -Now that you have finished authorizing the provisioner, go to [Deployment](deployment.md) for info on how to deploy it. diff --git a/nfs/docs/demo/README.md b/nfs/docs/demo/README.md deleted file mode 100644 index c10bb6daaf2..00000000000 --- a/nfs/docs/demo/README.md +++ /dev/null @@ -1,176 +0,0 @@ -# Demo - -The [beta dynamic provisioning feature](http://blog.kubernetes.io/2016/10/dynamic-provisioning-and-storage-in-kubernetes.html) allows administrators to define `StorageClasses` to enable Kubernetes to create `PersistentVolumes` on-demand. Kubernetes includes many [provisioners](http://kubernetes.io/docs/user-guide/persistent-volumes/#provisioner) to specify in `StorageClasses` definitions and now, with Kubernetes 1.5, also includes support for [external or out-of-tree provisioners](https://github.com/kubernetes/kubernetes/pull/30285) like [nfs-provisioner](https://github.com/kubernetes-incubator/external-storage/nfs). - -nfs-provisioner creates NFS-backed PV's, leveraging the NFS volume plugin of Kubernetes, so given the ubiquity of NFS it will work almost anywhere. It's ideal for local clusters and dev work, any place a PV is wanted but not the manual work of creating one. We'll demonstrate how to get it quickly up and running, following a variation of the Kubernetes repo's [NFS example](https://github.com/kubernetes/kubernetes/tree/release-1.5/examples/volumes/nfs). - -If the cluster you intend to follow this demo with has RBAC and/or PSP enabled or it's an OpenShift cluster, you must first complete the [authorization guide](../authorization.md). - -The recommended way to run nfs-provisioner, which we'll demonstrate here, is as a [single-instance stateful app](http://kubernetes.io/docs/tutorials/stateful-application/run-stateful-application/), where we create a `Deployment` and back it with some persistent storage like a `hostPath` volume. We always create it in tandem with a matching service that has the necessary ports exposed. We'll see that when it's setup like so, the NFS server it runs to serve its PV's can maintain state and so survive pod restarts. The other ways to run are as a `DaemonSet`, standalone Docker container, or standalone binary, all documented [here](../deployment.md) - -There are two main things one can customize here before creating the deployment: the provisioner name and the backing volume. - -The provisioner name must follow the naming scheme `/`, like for example `kubernetes.io/gce-pd`. It's specified here in the `args` field. This is the `provisioner` a `StorageClass` will specify later. We'll use the name `example.com/nfs`. - -```yaml -... -args: - - "-provisioner=example.com/nfs" -... -``` - -The backing volume is the place mounted at `/export` where the nfs-provisioner instance stores its state and the data of every PV it provisions. So we can mount any volume there to specify that volume as the backing storage for provisioned PV's. We'll use a [`hostPath`](http://kubernetes.io/docs/user-guide/volumes/#hostpath) volume at `/tmp/nfs-provisioner`, so we need to make sure that the directory exists on the nodes our deployment's pod could be scheduled to and, if selinux is enforcing, that it is labelled appropriately. - -```yaml -... - volumeMounts: - - name: export-volume - mountPath: /export -volumes: - - name: export-volume - hostPath: - path: /tmp/nfs-provisioner -... -``` - -```console -$ mkdir -p /tmp/nfs-provisioner -$ sudo chcon -Rt svirt_sandbox_file_t /tmp/nfs-provisioner -``` - -If you completed the [authorization guide](../authorization.md) (because your cluster has RBAC and/or PSP enabled or it's an OpenShift cluster) and it told you to remember to ensure the pod template of the deployment specifies the service account you created, do that now as well by adding a `serviceAccount` line. -```yaml -... - spec: - serviceAccount: nfs-provisioner - containers: -... -``` - -We create the deployment and its service. - -```console -$ kubectl create -f deployment.yaml -service "nfs-provisioner" created -deployment "nfs-provisioner" created -``` - -Now, our instance of nfs-provisioner can be treated like any other provisioner: we specify its name in a `StorageClass` object and the provisioner will automatically create `PersistentVolumes` for `PersistentVolumeClaims` that ask for the `StorageClass`. We'll show all that. - -We create a `StorageClass` that specifies our provisioner. - -```console -$ kubectl create -f class.yaml -storageclass "example-nfs" created -``` - -We create a `PersistentVolumeClaim` asking for our `StorageClass`. - -```console -$ kubectl create -f claim.yaml -persistentvolumeclaim "nfs" created -``` - -And a `PersistentVolume` is provisioned automatically and already bound to our claim. We didn't have to manually figure out the NFS server's IP, put that IP into a PV yaml, then create the yaml. We just had to deploy our nfs-provisioner and create a `StorageClass` for it, which are one-time steps. - -```console -$ kubectl get pv -NAME CAPACITY ACCESSMODES RECLAIMPOLICY STATUS CLAIM REASON AGE -pvc-dce84888-7a9d-11e6-b1ee-5254001e0c1b 1Mi RWX Delete Bound default/nfs -``` - -If you don't see a PV bound to your PVC, check the deployment's provisioner pod's logs using `kubectl logs` and look for events in the PVC using `kubectl describe`. - -Now we have an NFS-backed PVC & PV pair that is exactly like what is expected by the official Kubernetes NFS example, so we'll finish the [example](https://github.com/kubernetes/kubernetes/tree/release-1.5/examples/volumes/nfs#setup-the-fake-backend) to show our storage works, can be shared, and persists. If you don't need that proof, you can skip ahead to the part where we discuss deleting and cleaning up the provisioner and its storage. - -We setup the fake backend that updates `index.html` on the NFS server every 10 seconds. And check that our mounts are working. - -```console -$ kubectl create -f nfs-busybox-rc.yaml -$ kubectl get pod -l name=nfs-busybox -NAME READY STATUS RESTARTS AGE -nfs-busybox-h782l 1/1 Running 0 13m -nfs-busybox-nul47 1/1 Running 0 13m -$ kubectl exec nfs-busybox-h782l -- cat /mnt/index.html -Mon Dec 19 18:10:09 UTC 2016 -nfs-busybox-h782l -``` - -We setup the web server that reads from the NFS share and runs a simple web server on it. And check that `nginx` is serving the data, the `index.html` from above, appropriately. - -```console -$ kubectl create -f nfs-web-rc.yaml -$ kubectl create -f nfs-web-service.yaml -$ kubectl get pod -l name=nfs-busybox -NAME READY STATUS RESTARTS AGE -nfs-busybox-h782l 1/1 Running 0 13m -nfs-busybox-nul47 1/1 Running 0 13m -$ kubectl get services nfs-web -NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE -nfs-web 10.0.0.187 80/TCP 7s -$ kubectl exec nfs-busybox-h782l -- wget -qO- http://10.0.0.187 -Mon Dec 19 18:11:51 UTC 2016 -nfs-busybox-nul47 -``` - -We see that the PV created by our nfs-provisioner works, let's now show that it will continue to work even after our nfs-provisioner pod restarts. Because of how NFS works, anything that has shares mounted will hang as it tries to access or unmount them while the NFS server is down. Recall that all our nfs-provisioner instance's state and data persists in the volume we mounted at `/export`, so it should recover and its shares become accessible again when it, and the NFS server it runs, restarts. We'll simulate this situation. - -We scale the deployment down to 0 replicas. - -```console -$ kubectl scale --replicas=0 deployment/nfs-provisioner -deployment "nfs-provisioner" scaled -``` - -We try the same check from before that `nginx` is serving the data, and we see it hangs indefinitely as it tries to read the share. - -```console -$ kubectl exec nfs-busybox-h782l -- wget -qO- http://10.0.0.187 -... -^C -``` - -We scale the deployment back up to 1 replica. - -```console -$ kubectl scale --replicas=1 deployment/nfs-provisioner -deployment "nfs-provisioner" scaled -``` - -And after a brief delay all should be working again. - -```console -$ kubectl exec nfs-busybox-h782l -- wget -qO- http://10.0.0.187 -Mon Dec 19 18:21:49 UTC 2016 -nfs-busybox-nul47 -``` - -Now we'll show how to delete the storage provisioned by our nfs-provisioner once we're done with it. Let's first delete the fake backend and web server that are using the PVC. - -```console -$ kubectl delete rc nfs-busybox nfs-web -replicationcontroller "nfs-busybox" deleted -replicationcontroller "nfs-web" deleted -$ kubectl delete service nfs-web -service "nfs-web" deleted -``` - -Once all those pods have disappeared and so we are confident they have unmounted the NFS share, we can safely delete the PVC. The provisioned PV the PVC is bound to has the `ReclaimPolicy` `Delete`, so when we delete the PVC, the PV and its data will be automatically deleted by our nfs-provisioner. - -```console -$ kubectl delete pvc nfs -persistentvolumeclaim "nfs" deleted -$ kubectl get pv -``` - -Note that deleting an nfs-provisioner instance won't delete the PV's it created, so before we do so we need to make sure none still exist as they would be useless for as long as the provisioner is gone. - -```console -$ kubectl delete deployment nfs-provisioner -deployment "nfs-provisioner" deleted -$ kubectl delete service nfs-provisioner -service "nfs-provisioner" deleted -``` - -Thanks for following along. If at any point things didn't work correctly, check the provisioner pod's logs using `kubectl logs` and look for events in the PV's and PVC's using `kubectl describe`. If you are interested in Kubernetes storage-related things like this, head to the [Storage SIG](http://blog.kubernetes.io/2016/10/dynamic-provisioning-and-storage-in-kubernetes.html). If you are interested in writing your own external provisioner, all the code is available for you to read or fork, and better documentation on how to do it is in the works. - diff --git a/nfs/docs/demo/claim.yaml b/nfs/docs/demo/claim.yaml deleted file mode 100644 index 6c0eae8c9eb..00000000000 --- a/nfs/docs/demo/claim.yaml +++ /dev/null @@ -1,12 +0,0 @@ -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: nfs - annotations: - volume.beta.kubernetes.io/storage-class: "example-nfs" -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Mi diff --git a/nfs/docs/demo/class.yaml b/nfs/docs/demo/class.yaml deleted file mode 100644 index 7cf1ac935b7..00000000000 --- a/nfs/docs/demo/class.yaml +++ /dev/null @@ -1,5 +0,0 @@ -kind: StorageClass -apiVersion: storage.k8s.io/v1 -metadata: - name: example-nfs -provisioner: example.com/nfs diff --git a/nfs/docs/demo/deployment.yaml b/nfs/docs/demo/deployment.yaml deleted file mode 100644 index 70486917208..00000000000 --- a/nfs/docs/demo/deployment.yaml +++ /dev/null @@ -1,73 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: nfs-provisioner - labels: - app: nfs-provisioner -spec: - ports: - - name: nfs - port: 2049 - - name: mountd - port: 20048 - - name: rpcbind - port: 111 - - name: rpcbind-udp - port: 111 - protocol: UDP - selector: - app: nfs-provisioner ---- -kind: Deployment -apiVersion: extensions/v1beta1 -metadata: - name: nfs-provisioner -spec: - replicas: 1 - strategy: - type: Recreate - template: - metadata: - labels: - app: nfs-provisioner - spec: - containers: - - name: nfs-provisioner - image: quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9 - ports: - - name: nfs - containerPort: 2049 - - name: mountd - containerPort: 20048 - - name: rpcbind - containerPort: 111 - - name: rpcbind-udp - containerPort: 111 - protocol: UDP - securityContext: - capabilities: - add: - - DAC_READ_SEARCH - - SYS_RESOURCE - args: - - "-provisioner=example.com/nfs" - - "-grace-period=10" - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: SERVICE_NAME - value: nfs-provisioner - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - imagePullPolicy: "IfNotPresent" - volumeMounts: - - name: export-volume - mountPath: /export - volumes: - - name: export-volume - hostPath: - path: /tmp/nfs-provisioner diff --git a/nfs/docs/demo/nfs-busybox-rc.yaml b/nfs/docs/demo/nfs-busybox-rc.yaml deleted file mode 100644 index 617d0275585..00000000000 --- a/nfs/docs/demo/nfs-busybox-rc.yaml +++ /dev/null @@ -1,32 +0,0 @@ -# This mounts the nfs volume claim into /mnt and continuously -# overwrites /mnt/index.html with the time and hostname of the pod. - -apiVersion: v1 -kind: ReplicationController -metadata: - name: nfs-busybox -spec: - replicas: 2 - selector: - name: nfs-busybox - template: - metadata: - labels: - name: nfs-busybox - spec: - containers: - - image: busybox - command: - - sh - - -c - - 'while true; do date > /mnt/index.html; hostname >> /mnt/index.html; sleep $(($RANDOM % 5 + 5)); done' - imagePullPolicy: IfNotPresent - name: busybox - volumeMounts: - # name must match the volume name below - - name: nfs - mountPath: "/mnt" - volumes: - - name: nfs - persistentVolumeClaim: - claimName: nfs diff --git a/nfs/docs/demo/nfs-web-rc.yaml b/nfs/docs/demo/nfs-web-rc.yaml deleted file mode 100644 index 6c96682cb18..00000000000 --- a/nfs/docs/demo/nfs-web-rc.yaml +++ /dev/null @@ -1,30 +0,0 @@ -# This pod mounts the nfs volume claim into /usr/share/nginx/html and -# serves a simple web page. - -apiVersion: v1 -kind: ReplicationController -metadata: - name: nfs-web -spec: - replicas: 2 - selector: - role: web-frontend - template: - metadata: - labels: - role: web-frontend - spec: - containers: - - name: web - image: nginx - ports: - - name: web - containerPort: 80 - volumeMounts: - # name must match the volume name below - - name: nfs - mountPath: "/usr/share/nginx/html" - volumes: - - name: nfs - persistentVolumeClaim: - claimName: nfs diff --git a/nfs/docs/demo/nfs-web-service.yaml b/nfs/docs/demo/nfs-web-service.yaml deleted file mode 100644 index b73cac2bc94..00000000000 --- a/nfs/docs/demo/nfs-web-service.yaml +++ /dev/null @@ -1,9 +0,0 @@ -kind: Service -apiVersion: v1 -metadata: - name: nfs-web -spec: - ports: - - port: 80 - selector: - role: web-frontend diff --git a/nfs/docs/deployment.md b/nfs/docs/deployment.md index 52017db9565..3693ef817b9 100644 --- a/nfs/docs/deployment.md +++ b/nfs/docs/deployment.md @@ -22,7 +22,7 @@ $ make container If you are running in Kubernetes, it will pull the image from Quay for you. Or you can do it yourself. ``` -$ docker pull quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9 +$ docker pull quay.io/kubernetes_incubator/nfs-provisioner:latest ``` ## Deploying the provisioner @@ -34,13 +34,10 @@ $ ALLOW_SECURITY_CONTEXT=true API_HOST_IP=0.0.0.0 $GOPATH/src/k8s.io/kubernetes/ Decide on a unique name to give the provisioner that follows the naming scheme `/` where `` cannot be "kubernetes.io." The provisioner will only provision volumes for claims that request a `StorageClass` with a `provisioner` field set equal to this name. For example, the names of the in-tree GCE and AWS provisioners are `kubernetes.io/gce-pd` and `kubernetes.io/aws-ebs`. -Decide how to run nfs-provisioner and follow one of the below sections. The recommended way is running it as a [single-instance stateful app](http://kubernetes.io/docs/tutorials/stateful-application/run-stateful-application/), where you create a `Deployment`/`StatefulSet` and back it with some persistent storage like a `hostPath` volume. Running as a `DaemonSet` is for exposing & "pooling" multiple nodes' `hostPath` volumes. Running outside of Kubernetes as a standalone container or binary is for when you want greater control over the app's lifecycle and/or the ability to set per-PV quotas. - -**Note**: if you went through the [Authorization](authorization.md) guide, you should use the yaml specs in `deploy/kubernetes/auth` which have the spec.template.spec.serviceAccount set to "nfs-provisioner" for your convenience, instead of using the ones referred to here: e.g. `deploy/kubernetes/auth/deployment-sa.yaml` instead of `deploy/kubernetes/deployment.yaml`. +Decide how to run nfs-provisioner and follow one of the below sections. The recommended way is running it as a [single-instance stateful app](http://kubernetes.io/docs/tutorials/stateful-application/run-stateful-application/), where you create a `Deployment`/`StatefulSet` and back it with some persistent storage like a `hostPath` volume. Running outside of Kubernetes as a standalone container or binary is for when you want greater control over the app's lifecycle and/or the ability to set per-PV quotas. * [In Kubernetes - Deployment](#in-kubernetes---deployment-of-1-replica) * [In Kubernetes - StatefulSet](#in-kubernetes---statefulset-of-1-replica) -* [In Kubernetes - DaemonSet](#in-kubernetes---daemonset) * [Outside of Kubernetes - container](#outside-of-kubernetes---container) * [Outside of Kubernetes - binary](#outside-of-kubernetes---binary) @@ -57,6 +54,8 @@ Note that if you continue with the `hostPath` volume, its path must exist on the Create the deployment and its service. ``` +$ kubectl create -f deploy/kubernetes/psp.yaml +$ kubectl create -f deploy/kubernetes/rbac.yaml $ kubectl create -f deploy/kubernetes/deployment.yaml service "nfs-provisioner" created deployment "nfs-provisioner" created @@ -66,31 +65,6 @@ deployment "nfs-provisioner" created The procedure for running a stateful set is identical to [that for a deployment, above,](#in-kubernetes---deployment-of-1-replica) so wherever you see `deployment` there, replace it with `statefulset`. The benefit is that you get a stable hostname. But note that stateful sets are in beta. Note that the service cannot be headless, unlike in most examples of stateful sets. - -### In Kubernetes - DaemonSet - -Edit the `provisioner` argument in the `args` field in `deploy/kubernetes/daemonset.yaml` to be the provisioner's name you decided on. - -`deploy/kubernetes/daemonset.yaml` specifies a `hostPath` volume `/srv` mounted at `/export`. The `/export` directory is where the provisioner stores its state and provisioned `PersistentVolumes'` data, so by mounting a volume there, you specify it as the backing storage for provisioned PVs. Each pod in the daemon set does this, effectively creating a "pool" of their nodes' local storage. - -`deploy/kubernetes/daemonset.yaml` also specifies a `nodeSelector` to target nodes/hosts. Choose nodes to deploy nfs-provisioner on and be sure that the `hostPath` directory exists on each node: `mkdir -p /srv`. If SELinux is enforcing on the nodes, you may need to make the container [privileged](http://kubernetes.io/docs/user-guide/security-context/) or change the security context of the `hostPath` directory on the node: `sudo chcon -Rt svirt_sandbox_file_t /srv`. - -`deploy/kubernetes/daemonset.yaml` specifies a `hostPort` for NFS, TCP 2049, to expose on the node, so be sure that this port is available on each node. The daemon set's pods will use their node's name as the NFS server IP to put on their `PersistentVolumes`. - -Label the chosen nodes to match the `nodeSelector`. - -``` -$ kubectl label node 127.0.0.1 app=nfs-provisioner -node "127.0.0.1" labeled -``` - -Create the daemon set. - -``` -$ kubectl create -f deploy/kubernetes/daemonset.yaml -daemonset "nfs-provisioner" created -``` - ### Outside of Kubernetes - container The container is going to need to run with one of `master` or `kubeconfig` set. For the `kubeconfig` argument to work, the config file, and any certificate files it references by path like `certificate-authority: /var/run/kubernetes/apiserver.crt`, need to be inside the container somehow. This can be done by creating Docker volumes, or copying the files into the folder where the Dockerfile is and adding lines like `COPY config /.kube/config` to the Dockerfile before building the image. @@ -103,7 +77,7 @@ You may want to specify the hostname the NFS server exports from, i.e. the serve $ docker run --cap-add DAC_READ_SEARCH --cap-add SYS_RESOURCE \ --security-opt seccomp:deploy/docker/nfs-provisioner-seccomp.json \ -v $HOME/.kube:/.kube:Z \ -quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9 \ +quay.io/kubernetes_incubator/nfs-provisioner:latest \ -provisioner=example.com/nfs \ -kubeconfig=/.kube/config ``` @@ -111,7 +85,7 @@ or ``` $ docker run --cap-add DAC_READ_SEARCH --cap-add SYS_RESOURCE \ --security-opt seccomp:deploy/docker/nfs-provisioner-seccomp.json \ -quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9 \ +quay.io/kubernetes_incubator/nfs-provisioner:latest \ -provisioner=example.com/nfs \ -master=http://172.17.0.1:8080 ``` @@ -126,7 +100,7 @@ With the two above options, the run command will look something like this. $ docker run --privileged \ -v $HOME/.kube:/.kube:Z \ -v /xfs:/export:Z \ -quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9 \ +quay.io/kubernetes_incubator/nfs-provisioner:latest \ -provisioner=example.com/nfs \ -kubeconfig=/.kube/config \ -enable-xfs-quota=true diff --git a/nfs/docs/multiple.md b/nfs/docs/multiple.md deleted file mode 100644 index ddb238f38e6..00000000000 --- a/nfs/docs/multiple.md +++ /dev/null @@ -1,13 +0,0 @@ -## Running Multiple Provisioners - -### Single StorageClass - -Multiple nfs-provisioner instances can have the same name, i.e. the same value for the `provisioner` argument. They will watch for the same class of claims. When a claim is added, they will race to acquire a lock on it, and only the winner may actually attempt to provision a volume while the others must wait for success or failure. By default, the winner has up to 30 seconds to succeed or fail to provision a volume, after which the other provisioners again race for the lock. This minimizes the number of calls to `Provision`. - -### Multiple StorageClasses - -Multiple nfs-provisioner with different names can be running at the same time. They won't conflict because they'll try to provision storage for their own classes of claims. - -### Scaling - -Given that multiple instances can have the same name, to scale up or down a set of provisioner pods, you simply create or delete pods with the same provisioner name. This can mean adding nodes/pods to a DaemonSet or creating more Deployments, as described in the [Deployment](deployment.md) doc. Each additional instance would back its PVs with different storage, effectively creating & adding to a "pool" of storage. diff --git a/snapshot/deploy/kubernetes/aws/snapshot-rbac.yaml b/snapshot/deploy/kubernetes/aws/snapshot-rbac.yaml index 0b9d9411441..245c61dbd94 100644 --- a/snapshot/deploy/kubernetes/aws/snapshot-rbac.yaml +++ b/snapshot/deploy/kubernetes/aws/snapshot-rbac.yaml @@ -19,7 +19,10 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] - apiGroups: ["apiextensions.k8s.io"] resources: ["customresourcedefinitions"] verbs: ["create", "list", "watch", "delete"] diff --git a/snapshot/deploy/kubernetes/hostpath/snapshot-rbac.yaml b/snapshot/deploy/kubernetes/hostpath/snapshot-rbac.yaml index 0b9d9411441..245c61dbd94 100644 --- a/snapshot/deploy/kubernetes/hostpath/snapshot-rbac.yaml +++ b/snapshot/deploy/kubernetes/hostpath/snapshot-rbac.yaml @@ -19,7 +19,10 @@ rules: verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] - verbs: ["list", "watch", "create", "update", "patch"] + verbs: ["create", "update", "patch"] + - apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "list", "watch", "create", "update", "patch"] - apiGroups: ["apiextensions.k8s.io"] resources: ["customresourcedefinitions"] verbs: ["create", "list", "watch", "delete"] diff --git a/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE new file mode 100644 index 00000000000..d511905c164 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE new file mode 100644 index 00000000000..d511905c164 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE new file mode 100644 index 00000000000..5b6e7c66c27 --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/selinux/docker-engine-selinux/LICENSE @@ -0,0 +1,340 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE b/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE new file mode 100644 index 00000000000..e67cdabd22e --- /dev/null +++ b/vendor/github.com/docker/docker/contrib/syntax/vim/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 Honza Pokorny +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/docker/docs/static_files/contributors.png b/vendor/github.com/docker/docker/docs/static_files/contributors.png new file mode 100644 index 00000000000..63c0a0c09b5 Binary files /dev/null and b/vendor/github.com/docker/docker/docs/static_files/contributors.png differ diff --git a/vendor/github.com/docker/docker/hack/generate-authors.sh b/vendor/github.com/docker/docker/hack/generate-authors.sh new file mode 100755 index 00000000000..e78a97f9627 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/generate-authors.sh @@ -0,0 +1,15 @@ +#!/bin/bash +set -e + +cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." + +# see also ".mailmap" for how email addresses and names are deduplicated + +{ + cat <<-'EOH' + # This file lists all individuals having contributed content to the repository. + # For how it is generated, see `hack/generate-authors.sh`. + EOH + echo + git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf +} > AUTHORS diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default new file mode 120000 index 00000000000..4278533d659 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.default @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker.default \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init new file mode 120000 index 00000000000..8cb89d30dde --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.init @@ -0,0 +1 @@ +../../../contrib/init/sysvinit-debian/docker \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart new file mode 120000 index 00000000000..7e1b64a3e64 --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.docker.upstart @@ -0,0 +1 @@ +../../../contrib/init/upstart/docker.conf \ No newline at end of file diff --git a/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev new file mode 120000 index 00000000000..914a361959d --- /dev/null +++ b/vendor/github.com/docker/docker/hack/make/.build-deb/docker-engine.udev @@ -0,0 +1 @@ +../../../contrib/udev/80-docker.rules \ No newline at end of file diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE new file mode 100644 index 00000000000..34c4ea7c505 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.APACHE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD new file mode 100644 index 00000000000..9b4f4a294ea --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/symlink/LICENSE.BSD @@ -0,0 +1,27 @@ +Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/docker/docker/project/CONTRIBUTORS.md b/vendor/github.com/docker/docker/project/CONTRIBUTORS.md new file mode 120000 index 00000000000..44fcc634393 --- /dev/null +++ b/vendor/github.com/docker/docker/project/CONTRIBUTORS.md @@ -0,0 +1 @@ +../CONTRIBUTING.md \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/testdata/license.yml b/vendor/github.com/imdario/mergo/testdata/license.yml new file mode 100644 index 00000000000..62fdb61ec32 --- /dev/null +++ b/vendor/github.com/imdario/mergo/testdata/license.yml @@ -0,0 +1,3 @@ +import: ../../../../fossene/db/schema/thing.yml +fields: + site: string diff --git a/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/LICENSE b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/LICENSE new file mode 100644 index 00000000000..545cf2d3311 --- /dev/null +++ b/vendor/github.com/magiconair/properties/_third_party/gopkg.in/check.v1/LICENSE @@ -0,0 +1,25 @@ +Gocheck - A rich testing framework for Go + +Copyright (c) 2010-2013 Gustavo Niemeyer + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/petar/GoLLRB/AUTHORS b/vendor/github.com/petar/GoLLRB/AUTHORS new file mode 100644 index 00000000000..78d1de49566 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/AUTHORS @@ -0,0 +1,4 @@ +Petar Maymounkov +Vadim Vygonets +Ian Smith +Martin Bruse diff --git a/vendor/github.com/petar/GoLLRB/LICENSE b/vendor/github.com/petar/GoLLRB/LICENSE new file mode 100644 index 00000000000..b75312c787d --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2010, Petar Maymounkov +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +(*) Redistributions of source code must retain the above copyright notice, this list +of conditions and the following disclaimer. + +(*) Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +(*) Neither the name of Petar Maymounkov nor the names of its contributors may be +used to endorse or promote products derived from this software without specific +prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/petar/GoLLRB/llrb/avgvar.go b/vendor/github.com/petar/GoLLRB/llrb/avgvar.go new file mode 100644 index 00000000000..2d7e2a3262d --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/avgvar.go @@ -0,0 +1,39 @@ +// Copyright 2010 Petar Maymounkov. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package llrb + +import "math" + +// avgVar maintains the average and variance of a stream of numbers +// in a space-efficient manner. +type avgVar struct { + count int64 + sum, sumsq float64 +} + +func (av *avgVar) Init() { + av.count = 0 + av.sum = 0.0 + av.sumsq = 0.0 +} + +func (av *avgVar) Add(sample float64) { + av.count++ + av.sum += sample + av.sumsq += sample * sample +} + +func (av *avgVar) GetCount() int64 { return av.count } + +func (av *avgVar) GetAvg() float64 { return av.sum / float64(av.count) } + +func (av *avgVar) GetTotal() float64 { return av.sum } + +func (av *avgVar) GetVar() float64 { + a := av.GetAvg() + return av.sumsq/float64(av.count) - a*a +} + +func (av *avgVar) GetStdDev() float64 { return math.Sqrt(av.GetVar()) } diff --git a/vendor/github.com/petar/GoLLRB/llrb/iterator.go b/vendor/github.com/petar/GoLLRB/llrb/iterator.go new file mode 100644 index 00000000000..ee7b27f442b --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/iterator.go @@ -0,0 +1,93 @@ +package llrb + +type ItemIterator func(i Item) bool + +//func (t *Tree) Ascend(iterator ItemIterator) { +// t.AscendGreaterOrEqual(Inf(-1), iterator) +//} + +func (t *LLRB) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + t.ascendRange(t.root, greaterOrEqual, lessThan, iterator) +} + +func (t *LLRB) ascendRange(h *Node, inf, sup Item, iterator ItemIterator) bool { + if h == nil { + return true + } + if !less(h.Item, sup) { + return t.ascendRange(h.Left, inf, sup, iterator) + } + if less(h.Item, inf) { + return t.ascendRange(h.Right, inf, sup, iterator) + } + + if !t.ascendRange(h.Left, inf, sup, iterator) { + return false + } + if !iterator(h.Item) { + return false + } + return t.ascendRange(h.Right, inf, sup, iterator) +} + +// AscendGreaterOrEqual will call iterator once for each element greater or equal to +// pivot in ascending order. It will stop whenever the iterator returns false. +func (t *LLRB) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + t.ascendGreaterOrEqual(t.root, pivot, iterator) +} + +func (t *LLRB) ascendGreaterOrEqual(h *Node, pivot Item, iterator ItemIterator) bool { + if h == nil { + return true + } + if !less(h.Item, pivot) { + if !t.ascendGreaterOrEqual(h.Left, pivot, iterator) { + return false + } + if !iterator(h.Item) { + return false + } + } + return t.ascendGreaterOrEqual(h.Right, pivot, iterator) +} + +func (t *LLRB) AscendLessThan(pivot Item, iterator ItemIterator) { + t.ascendLessThan(t.root, pivot, iterator) +} + +func (t *LLRB) ascendLessThan(h *Node, pivot Item, iterator ItemIterator) bool { + if h == nil { + return true + } + if !t.ascendLessThan(h.Left, pivot, iterator) { + return false + } + if !iterator(h.Item) { + return false + } + if less(h.Item, pivot) { + return t.ascendLessThan(h.Left, pivot, iterator) + } + return true +} + +// DescendLessOrEqual will call iterator once for each element less than the +// pivot in descending order. It will stop whenever the iterator returns false. +func (t *LLRB) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + t.descendLessOrEqual(t.root, pivot, iterator) +} + +func (t *LLRB) descendLessOrEqual(h *Node, pivot Item, iterator ItemIterator) bool { + if h == nil { + return true + } + if less(h.Item, pivot) || !less(pivot, h.Item) { + if !t.descendLessOrEqual(h.Right, pivot, iterator) { + return false + } + if !iterator(h.Item) { + return false + } + } + return t.descendLessOrEqual(h.Left, pivot, iterator) +} diff --git a/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go b/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go new file mode 100644 index 00000000000..47126a3be96 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/llrb-stats.go @@ -0,0 +1,46 @@ +// Copyright 2010 Petar Maymounkov. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package llrb + +// GetHeight() returns an item in the tree with key @key, and it's height in the tree +func (t *LLRB) GetHeight(key Item) (result Item, depth int) { + return t.getHeight(t.root, key) +} + +func (t *LLRB) getHeight(h *Node, item Item) (Item, int) { + if h == nil { + return nil, 0 + } + if less(item, h.Item) { + result, depth := t.getHeight(h.Left, item) + return result, depth + 1 + } + if less(h.Item, item) { + result, depth := t.getHeight(h.Right, item) + return result, depth + 1 + } + return h.Item, 0 +} + +// HeightStats() returns the average and standard deviation of the height +// of elements in the tree +func (t *LLRB) HeightStats() (avg, stddev float64) { + av := &avgVar{} + heightStats(t.root, 0, av) + return av.GetAvg(), av.GetStdDev() +} + +func heightStats(h *Node, d int, av *avgVar) { + if h == nil { + return + } + av.Add(float64(d)) + if h.Left != nil { + heightStats(h.Left, d+1, av) + } + if h.Right != nil { + heightStats(h.Right, d+1, av) + } +} diff --git a/vendor/github.com/petar/GoLLRB/llrb/llrb.go b/vendor/github.com/petar/GoLLRB/llrb/llrb.go new file mode 100644 index 00000000000..81373fbfdf0 --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/llrb.go @@ -0,0 +1,456 @@ +// Copyright 2010 Petar Maymounkov. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// A Left-Leaning Red-Black (LLRB) implementation of 2-3 balanced binary search trees, +// based on the following work: +// +// http://www.cs.princeton.edu/~rs/talks/LLRB/08Penn.pdf +// http://www.cs.princeton.edu/~rs/talks/LLRB/LLRB.pdf +// http://www.cs.princeton.edu/~rs/talks/LLRB/Java/RedBlackBST.java +// +// 2-3 trees (and the run-time equivalent 2-3-4 trees) are the de facto standard BST +// algoritms found in implementations of Python, Java, and other libraries. The LLRB +// implementation of 2-3 trees is a recent improvement on the traditional implementation, +// observed and documented by Robert Sedgewick. +// +package llrb + +// Tree is a Left-Leaning Red-Black (LLRB) implementation of 2-3 trees +type LLRB struct { + count int + root *Node +} + +type Node struct { + Item + Left, Right *Node // Pointers to left and right child nodes + Black bool // If set, the color of the link (incoming from the parent) is black + // In the LLRB, new nodes are always red, hence the zero-value for node +} + +type Item interface { + Less(than Item) bool +} + +// +func less(x, y Item) bool { + if x == pinf { + return false + } + if x == ninf { + return true + } + return x.Less(y) +} + +// Inf returns an Item that is "bigger than" any other item, if sign is positive. +// Otherwise it returns an Item that is "smaller than" any other item. +func Inf(sign int) Item { + if sign == 0 { + panic("sign") + } + if sign > 0 { + return pinf + } + return ninf +} + +var ( + ninf = nInf{} + pinf = pInf{} +) + +type nInf struct{} + +func (nInf) Less(Item) bool { + return true +} + +type pInf struct{} + +func (pInf) Less(Item) bool { + return false +} + +// New() allocates a new tree +func New() *LLRB { + return &LLRB{} +} + +// SetRoot sets the root node of the tree. +// It is intended to be used by functions that deserialize the tree. +func (t *LLRB) SetRoot(r *Node) { + t.root = r +} + +// Root returns the root node of the tree. +// It is intended to be used by functions that serialize the tree. +func (t *LLRB) Root() *Node { + return t.root +} + +// Len returns the number of nodes in the tree. +func (t *LLRB) Len() int { return t.count } + +// Has returns true if the tree contains an element whose order is the same as that of key. +func (t *LLRB) Has(key Item) bool { + return t.Get(key) != nil +} + +// Get retrieves an element from the tree whose order is the same as that of key. +func (t *LLRB) Get(key Item) Item { + h := t.root + for h != nil { + switch { + case less(key, h.Item): + h = h.Left + case less(h.Item, key): + h = h.Right + default: + return h.Item + } + } + return nil +} + +// Min returns the minimum element in the tree. +func (t *LLRB) Min() Item { + h := t.root + if h == nil { + return nil + } + for h.Left != nil { + h = h.Left + } + return h.Item +} + +// Max returns the maximum element in the tree. +func (t *LLRB) Max() Item { + h := t.root + if h == nil { + return nil + } + for h.Right != nil { + h = h.Right + } + return h.Item +} + +func (t *LLRB) ReplaceOrInsertBulk(items ...Item) { + for _, i := range items { + t.ReplaceOrInsert(i) + } +} + +func (t *LLRB) InsertNoReplaceBulk(items ...Item) { + for _, i := range items { + t.InsertNoReplace(i) + } +} + +// ReplaceOrInsert inserts item into the tree. If an existing +// element has the same order, it is removed from the tree and returned. +func (t *LLRB) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("inserting nil item") + } + var replaced Item + t.root, replaced = t.replaceOrInsert(t.root, item) + t.root.Black = true + if replaced == nil { + t.count++ + } + return replaced +} + +func (t *LLRB) replaceOrInsert(h *Node, item Item) (*Node, Item) { + if h == nil { + return newNode(item), nil + } + + h = walkDownRot23(h) + + var replaced Item + if less(item, h.Item) { // BUG + h.Left, replaced = t.replaceOrInsert(h.Left, item) + } else if less(h.Item, item) { + h.Right, replaced = t.replaceOrInsert(h.Right, item) + } else { + replaced, h.Item = h.Item, item + } + + h = walkUpRot23(h) + + return h, replaced +} + +// InsertNoReplace inserts item into the tree. If an existing +// element has the same order, both elements remain in the tree. +func (t *LLRB) InsertNoReplace(item Item) { + if item == nil { + panic("inserting nil item") + } + t.root = t.insertNoReplace(t.root, item) + t.root.Black = true + t.count++ +} + +func (t *LLRB) insertNoReplace(h *Node, item Item) *Node { + if h == nil { + return newNode(item) + } + + h = walkDownRot23(h) + + if less(item, h.Item) { + h.Left = t.insertNoReplace(h.Left, item) + } else { + h.Right = t.insertNoReplace(h.Right, item) + } + + return walkUpRot23(h) +} + +// Rotation driver routines for 2-3 algorithm + +func walkDownRot23(h *Node) *Node { return h } + +func walkUpRot23(h *Node) *Node { + if isRed(h.Right) && !isRed(h.Left) { + h = rotateLeft(h) + } + + if isRed(h.Left) && isRed(h.Left.Left) { + h = rotateRight(h) + } + + if isRed(h.Left) && isRed(h.Right) { + flip(h) + } + + return h +} + +// Rotation driver routines for 2-3-4 algorithm + +func walkDownRot234(h *Node) *Node { + if isRed(h.Left) && isRed(h.Right) { + flip(h) + } + + return h +} + +func walkUpRot234(h *Node) *Node { + if isRed(h.Right) && !isRed(h.Left) { + h = rotateLeft(h) + } + + if isRed(h.Left) && isRed(h.Left.Left) { + h = rotateRight(h) + } + + return h +} + +// DeleteMin deletes the minimum element in the tree and returns the +// deleted item or nil otherwise. +func (t *LLRB) DeleteMin() Item { + var deleted Item + t.root, deleted = deleteMin(t.root) + if t.root != nil { + t.root.Black = true + } + if deleted != nil { + t.count-- + } + return deleted +} + +// deleteMin code for LLRB 2-3 trees +func deleteMin(h *Node) (*Node, Item) { + if h == nil { + return nil, nil + } + if h.Left == nil { + return nil, h.Item + } + + if !isRed(h.Left) && !isRed(h.Left.Left) { + h = moveRedLeft(h) + } + + var deleted Item + h.Left, deleted = deleteMin(h.Left) + + return fixUp(h), deleted +} + +// DeleteMax deletes the maximum element in the tree and returns +// the deleted item or nil otherwise +func (t *LLRB) DeleteMax() Item { + var deleted Item + t.root, deleted = deleteMax(t.root) + if t.root != nil { + t.root.Black = true + } + if deleted != nil { + t.count-- + } + return deleted +} + +func deleteMax(h *Node) (*Node, Item) { + if h == nil { + return nil, nil + } + if isRed(h.Left) { + h = rotateRight(h) + } + if h.Right == nil { + return nil, h.Item + } + if !isRed(h.Right) && !isRed(h.Right.Left) { + h = moveRedRight(h) + } + var deleted Item + h.Right, deleted = deleteMax(h.Right) + + return fixUp(h), deleted +} + +// Delete deletes an item from the tree whose key equals key. +// The deleted item is return, otherwise nil is returned. +func (t *LLRB) Delete(key Item) Item { + var deleted Item + t.root, deleted = t.delete(t.root, key) + if t.root != nil { + t.root.Black = true + } + if deleted != nil { + t.count-- + } + return deleted +} + +func (t *LLRB) delete(h *Node, item Item) (*Node, Item) { + var deleted Item + if h == nil { + return nil, nil + } + if less(item, h.Item) { + if h.Left == nil { // item not present. Nothing to delete + return h, nil + } + if !isRed(h.Left) && !isRed(h.Left.Left) { + h = moveRedLeft(h) + } + h.Left, deleted = t.delete(h.Left, item) + } else { + if isRed(h.Left) { + h = rotateRight(h) + } + // If @item equals @h.Item and no right children at @h + if !less(h.Item, item) && h.Right == nil { + return nil, h.Item + } + // PETAR: Added 'h.Right != nil' below + if h.Right != nil && !isRed(h.Right) && !isRed(h.Right.Left) { + h = moveRedRight(h) + } + // If @item equals @h.Item, and (from above) 'h.Right != nil' + if !less(h.Item, item) { + var subDeleted Item + h.Right, subDeleted = deleteMin(h.Right) + if subDeleted == nil { + panic("logic") + } + deleted, h.Item = h.Item, subDeleted + } else { // Else, @item is bigger than @h.Item + h.Right, deleted = t.delete(h.Right, item) + } + } + + return fixUp(h), deleted +} + +// Internal node manipulation routines + +func newNode(item Item) *Node { return &Node{Item: item} } + +func isRed(h *Node) bool { + if h == nil { + return false + } + return !h.Black +} + +func rotateLeft(h *Node) *Node { + x := h.Right + if x.Black { + panic("rotating a black link") + } + h.Right = x.Left + x.Left = h + x.Black = h.Black + h.Black = false + return x +} + +func rotateRight(h *Node) *Node { + x := h.Left + if x.Black { + panic("rotating a black link") + } + h.Left = x.Right + x.Right = h + x.Black = h.Black + h.Black = false + return x +} + +// REQUIRE: Left and Right children must be present +func flip(h *Node) { + h.Black = !h.Black + h.Left.Black = !h.Left.Black + h.Right.Black = !h.Right.Black +} + +// REQUIRE: Left and Right children must be present +func moveRedLeft(h *Node) *Node { + flip(h) + if isRed(h.Right.Left) { + h.Right = rotateRight(h.Right) + h = rotateLeft(h) + flip(h) + } + return h +} + +// REQUIRE: Left and Right children must be present +func moveRedRight(h *Node) *Node { + flip(h) + if isRed(h.Left.Left) { + h = rotateRight(h) + flip(h) + } + return h +} + +func fixUp(h *Node) *Node { + if isRed(h.Right) { + h = rotateLeft(h) + } + + if isRed(h.Left) && isRed(h.Left.Left) { + h = rotateRight(h) + } + + if isRed(h.Left) && isRed(h.Right) { + flip(h) + } + + return h +} diff --git a/vendor/github.com/petar/GoLLRB/llrb/util.go b/vendor/github.com/petar/GoLLRB/llrb/util.go new file mode 100644 index 00000000000..63dbdb2df0a --- /dev/null +++ b/vendor/github.com/petar/GoLLRB/llrb/util.go @@ -0,0 +1,17 @@ +// Copyright 2010 Petar Maymounkov. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package llrb + +type Int int + +func (x Int) Less(than Item) bool { + return x < than.(Int) +} + +type String string + +func (x String) Less(than Item) bool { + return x < than.(String) +} diff --git a/vendor/github.com/prometheus/client_model/ruby/LICENSE b/vendor/github.com/prometheus/client_model/ruby/LICENSE new file mode 100644 index 00000000000..11069edd790 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/ruby/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/exe b/vendor/github.com/prometheus/procfs/fixtures/26231/exe new file mode 120000 index 00000000000..a91bec4dac3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures/26231/exe @@ -0,0 +1 @@ +/usr/bin/vim \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/0 b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/0 new file mode 120000 index 00000000000..da9c5dff3e0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/0 @@ -0,0 +1 @@ +../../symlinktargets/abc \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/1 b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/1 new file mode 120000 index 00000000000..ca47b50ca5e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/1 @@ -0,0 +1 @@ +../../symlinktargets/def \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/10 b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/10 new file mode 120000 index 00000000000..c086831683e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/10 @@ -0,0 +1 @@ +../../symlinktargets/xyz \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/2 b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/2 new file mode 120000 index 00000000000..66731c06890 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/2 @@ -0,0 +1 @@ +../../symlinktargets/ghi \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26231/fd/3 b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/3 new file mode 120000 index 00000000000..0135dce35f9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures/26231/fd/3 @@ -0,0 +1 @@ +../../symlinktargets/uvw \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/0 b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/0 new file mode 120000 index 00000000000..da9c5dff3e0 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/0 @@ -0,0 +1 @@ +../../symlinktargets/abc \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/1 b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/1 new file mode 120000 index 00000000000..ca47b50ca5e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/1 @@ -0,0 +1 @@ +../../symlinktargets/def \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/2 b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/2 new file mode 120000 index 00000000000..66731c06890 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/2 @@ -0,0 +1 @@ +../../symlinktargets/ghi \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/3 b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/3 new file mode 120000 index 00000000000..0135dce35f9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/3 @@ -0,0 +1 @@ +../../symlinktargets/uvw \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/26232/fd/4 b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/4 new file mode 120000 index 00000000000..c086831683e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures/26232/fd/4 @@ -0,0 +1 @@ +../../symlinktargets/xyz \ No newline at end of file diff --git a/vendor/github.com/prometheus/procfs/fixtures/self b/vendor/github.com/prometheus/procfs/fixtures/self new file mode 120000 index 00000000000..1eeedea3d22 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures/self @@ -0,0 +1 @@ +26231 \ No newline at end of file diff --git a/vendor/golang.org/x/text/internal/gen/code.go b/vendor/golang.org/x/text/internal/gen/code.go new file mode 100644 index 00000000000..d7031b6945a --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/code.go @@ -0,0 +1,351 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gen + +import ( + "bytes" + "encoding/gob" + "fmt" + "hash" + "hash/fnv" + "io" + "log" + "os" + "reflect" + "strings" + "unicode" + "unicode/utf8" +) + +// This file contains utilities for generating code. + +// TODO: other write methods like: +// - slices, maps, types, etc. + +// CodeWriter is a utility for writing structured code. It computes the content +// hash and size of written content. It ensures there are newlines between +// written code blocks. +type CodeWriter struct { + buf bytes.Buffer + Size int + Hash hash.Hash32 // content hash + gob *gob.Encoder + // For comments we skip the usual one-line separator if they are followed by + // a code block. + skipSep bool +} + +func (w *CodeWriter) Write(p []byte) (n int, err error) { + return w.buf.Write(p) +} + +// NewCodeWriter returns a new CodeWriter. +func NewCodeWriter() *CodeWriter { + h := fnv.New32() + return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)} +} + +// WriteGoFile appends the buffer with the total size of all created structures +// and writes it as a Go file to the the given file with the given package name. +func (w *CodeWriter) WriteGoFile(filename, pkg string) { + f, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer f.Close() + if _, err = w.WriteGo(f, pkg); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo appends the buffer with the total size of all created structures and +// writes it as a Go file to the the given writer with the given package name. +func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) { + sz := w.Size + w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32()) + defer w.buf.Reset() + return WriteGo(out, pkg, w.buf.Bytes()) +} + +func (w *CodeWriter) printf(f string, x ...interface{}) { + fmt.Fprintf(w, f, x...) +} + +func (w *CodeWriter) insertSep() { + if w.skipSep { + w.skipSep = false + return + } + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n") +} + +// WriteComment writes a comment block. All line starts are prefixed with "//". +// Initial empty lines are gobbled. The indentation for the first line is +// stripped from consecutive lines. +func (w *CodeWriter) WriteComment(comment string, args ...interface{}) { + s := fmt.Sprintf(comment, args...) + s = strings.Trim(s, "\n") + + // Use at least two newlines to ensure a blank space between the previous + // block. WriteGoFile will remove extraneous newlines. + w.printf("\n\n// ") + w.skipSep = true + + // strip first indent level. + sep := "\n" + for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] { + sep += s[:1] + } + + strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s) + + w.printf("\n") +} + +func (w *CodeWriter) writeSizeInfo(size int) { + w.printf("// Size: %d bytes\n", size) +} + +// WriteConst writes a constant of the given name and value. +func (w *CodeWriter) WriteConst(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + + switch v.Type().Kind() { + case reflect.String: + w.printf("const %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + w.printf("\n") + default: + w.printf("const %s = %#v\n", name, x) + } +} + +// WriteVar writes a variable of the given name and value. +func (w *CodeWriter) WriteVar(name string, x interface{}) { + w.insertSep() + v := reflect.ValueOf(x) + oldSize := w.Size + sz := int(v.Type().Size()) + w.Size += sz + + switch v.Type().Kind() { + case reflect.String: + w.printf("var %s %s = ", name, typeName(x)) + w.WriteString(v.String()) + case reflect.Struct: + w.gob.Encode(x) + fallthrough + case reflect.Slice, reflect.Array: + w.printf("var %s = ", name) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + default: + w.printf("var %s %s = ", name, typeName(x)) + w.gob.Encode(x) + w.writeValue(v) + w.writeSizeInfo(w.Size - oldSize) + } + w.printf("\n") +} + +func (w *CodeWriter) writeValue(v reflect.Value) { + x := v.Interface() + switch v.Kind() { + case reflect.String: + w.WriteString(v.String()) + case reflect.Array: + // Don't double count: callers of WriteArray count on the size being + // added, so we need to discount it here. + w.Size -= int(v.Type().Size()) + w.writeSlice(x, true) + case reflect.Slice: + w.writeSlice(x, false) + case reflect.Struct: + w.printf("%s{\n", typeName(v.Interface())) + t := v.Type() + for i := 0; i < v.NumField(); i++ { + w.printf("%s: ", t.Field(i).Name) + w.writeValue(v.Field(i)) + w.printf(",\n") + } + w.printf("}") + default: + w.printf("%#v", x) + } +} + +// WriteString writes a string literal. +func (w *CodeWriter) WriteString(s string) { + s = strings.Replace(s, `\`, `\\`, -1) + io.WriteString(w.Hash, s) // content hash + w.Size += len(s) + + const maxInline = 40 + if len(s) <= maxInline { + w.printf("%q", s) + return + } + + // We will render the string as a multi-line string. + const maxWidth = 80 - 4 - len(`"`) - len(`" +`) + + // When starting on its own line, go fmt indents line 2+ an extra level. + n, max := maxWidth, maxWidth-4 + + // As per https://golang.org/issue/18078, the compiler has trouble + // compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN, + // for large N. We insert redundant, explicit parentheses to work around + // that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 + + // ... + s127) + etc + (etc + ... + sN). + explicitParens, extraComment := len(s) > 128*1024, "" + if explicitParens { + w.printf(`(`) + extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078" + } + + // Print "" +\n, if a string does not start on its own line. + b := w.buf.Bytes() + if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' { + w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment) + n, max = maxWidth, maxWidth + } + + w.printf(`"`) + + for sz, p, nLines := 0, 0, 0; p < len(s); { + var r rune + r, sz = utf8.DecodeRuneInString(s[p:]) + out := s[p : p+sz] + chars := 1 + if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' { + switch sz { + case 1: + out = fmt.Sprintf("\\x%02x", s[p]) + case 2, 3: + out = fmt.Sprintf("\\u%04x", r) + case 4: + out = fmt.Sprintf("\\U%08x", r) + } + chars = len(out) + } + if n -= chars; n < 0 { + nLines++ + if explicitParens && nLines&63 == 63 { + w.printf("\") + (\"") + } + w.printf("\" +\n\"") + n = max - len(out) + } + w.printf("%s", out) + p += sz + } + w.printf(`"`) + if explicitParens { + w.printf(`)`) + } +} + +// WriteSlice writes a slice value. +func (w *CodeWriter) WriteSlice(x interface{}) { + w.writeSlice(x, false) +} + +// WriteArray writes an array value. +func (w *CodeWriter) WriteArray(x interface{}) { + w.writeSlice(x, true) +} + +func (w *CodeWriter) writeSlice(x interface{}, isArray bool) { + v := reflect.ValueOf(x) + w.gob.Encode(v.Len()) + w.Size += v.Len() * int(v.Type().Elem().Size()) + name := typeName(x) + if isArray { + name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:]) + } + if isArray { + w.printf("%s{\n", name) + } else { + w.printf("%s{ // %d elements\n", name, v.Len()) + } + + switch kind := v.Type().Elem().Kind(); kind { + case reflect.String: + for _, s := range x.([]string) { + w.WriteString(s) + w.printf(",\n") + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // nLine and nBlock are the number of elements per line and block. + nLine, nBlock, format := 8, 64, "%d," + switch kind { + case reflect.Uint8: + format = "%#02x," + case reflect.Uint16: + format = "%#04x," + case reflect.Uint32: + nLine, nBlock, format = 4, 32, "%#08x," + case reflect.Uint, reflect.Uint64: + nLine, nBlock, format = 4, 32, "%#016x," + case reflect.Int8: + nLine = 16 + } + n := nLine + for i := 0; i < v.Len(); i++ { + if i%nBlock == 0 && v.Len() > nBlock { + w.printf("// Entry %X - %X\n", i, i+nBlock-1) + } + x := v.Index(i).Interface() + w.gob.Encode(x) + w.printf(format, x) + if n--; n == 0 { + n = nLine + w.printf("\n") + } + } + w.printf("\n") + case reflect.Struct: + zero := reflect.Zero(v.Type().Elem()).Interface() + for i := 0; i < v.Len(); i++ { + x := v.Index(i).Interface() + w.gob.EncodeValue(v) + if !reflect.DeepEqual(zero, x) { + line := fmt.Sprintf("%#v,\n", x) + line = line[strings.IndexByte(line, '{'):] + w.printf("%d: ", i) + w.printf(line) + } + } + case reflect.Array: + for i := 0; i < v.Len(); i++ { + w.printf("%d: %#v,\n", i, v.Index(i).Interface()) + } + default: + panic("gen: slice elem type not supported") + } + w.printf("}") +} + +// WriteType writes a definition of the type of the given value and returns the +// type name. +func (w *CodeWriter) WriteType(x interface{}) string { + t := reflect.TypeOf(x) + w.printf("type %s struct {\n", t.Name()) + for i := 0; i < t.NumField(); i++ { + w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type) + } + w.printf("}\n") + return t.Name() +} + +// typeName returns the name of the go type of x. +func typeName(x interface{}) string { + t := reflect.ValueOf(x).Type() + return strings.Replace(fmt.Sprint(t), "main.", "", 1) +} diff --git a/vendor/golang.org/x/text/internal/gen/gen.go b/vendor/golang.org/x/text/internal/gen/gen.go new file mode 100644 index 00000000000..2acb0355a26 --- /dev/null +++ b/vendor/golang.org/x/text/internal/gen/gen.go @@ -0,0 +1,281 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package gen contains common code for the various code generation tools in the +// text repository. Its usage ensures consistency between tools. +// +// This package defines command line flags that are common to most generation +// tools. The flags allow for specifying specific Unicode and CLDR versions +// in the public Unicode data repository (http://www.unicode.org/Public). +// +// A local Unicode data mirror can be set through the flag -local or the +// environment variable UNICODE_DIR. The former takes precedence. The local +// directory should follow the same structure as the public repository. +// +// IANA data can also optionally be mirrored by putting it in the iana directory +// rooted at the top of the local mirror. Beware, though, that IANA data is not +// versioned. So it is up to the developer to use the right version. +package gen // import "golang.org/x/text/internal/gen" + +import ( + "bytes" + "flag" + "fmt" + "go/build" + "go/format" + "io" + "io/ioutil" + "log" + "net/http" + "os" + "path" + "path/filepath" + "sync" + "unicode" + + "golang.org/x/text/unicode/cldr" +) + +var ( + url = flag.String("url", + "http://www.unicode.org/Public", + "URL of Unicode database directory") + iana = flag.String("iana", + "http://www.iana.org", + "URL of the IANA repository") + unicodeVersion = flag.String("unicode", + getEnv("UNICODE_VERSION", unicode.Version), + "unicode version to use") + cldrVersion = flag.String("cldr", + getEnv("CLDR_VERSION", cldr.Version), + "cldr version to use") +) + +func getEnv(name, def string) string { + if v := os.Getenv(name); v != "" { + return v + } + return def +} + +// Init performs common initialization for a gen command. It parses the flags +// and sets up the standard logging parameters. +func Init() { + log.SetPrefix("") + log.SetFlags(log.Lshortfile) + flag.Parse() +} + +const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package %s + +` + +// UnicodeVersion reports the requested Unicode version. +func UnicodeVersion() string { + return *unicodeVersion +} + +// UnicodeVersion reports the requested CLDR version. +func CLDRVersion() string { + return *cldrVersion +} + +// IsLocal reports whether data files are available locally. +func IsLocal() bool { + dir, err := localReadmeFile() + if err != nil { + return false + } + if _, err = os.Stat(dir); err != nil { + return false + } + return true +} + +// OpenUCDFile opens the requested UCD file. The file is specified relative to +// the public Unicode root directory. It will call log.Fatal if there are any +// errors. +func OpenUCDFile(file string) io.ReadCloser { + return openUnicode(path.Join(*unicodeVersion, "ucd", file)) +} + +// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there +// are any errors. +func OpenCLDRCoreZip() io.ReadCloser { + return OpenUnicodeFile("cldr", *cldrVersion, "core.zip") +} + +// OpenUnicodeFile opens the requested file of the requested category from the +// root of the Unicode data archive. The file is specified relative to the +// public Unicode root directory. If version is "", it will use the default +// Unicode version. It will call log.Fatal if there are any errors. +func OpenUnicodeFile(category, version, file string) io.ReadCloser { + if version == "" { + version = UnicodeVersion() + } + return openUnicode(path.Join(category, version, file)) +} + +// OpenIANAFile opens the requested IANA file. The file is specified relative +// to the IANA root, which is typically either http://www.iana.org or the +// iana directory in the local mirror. It will call log.Fatal if there are any +// errors. +func OpenIANAFile(path string) io.ReadCloser { + return Open(*iana, "iana", path) +} + +var ( + dirMutex sync.Mutex + localDir string +) + +const permissions = 0755 + +func localReadmeFile() (string, error) { + p, err := build.Import("golang.org/x/text", "", build.FindOnly) + if err != nil { + return "", fmt.Errorf("Could not locate package: %v", err) + } + return filepath.Join(p.Dir, "DATA", "README"), nil +} + +func getLocalDir() string { + dirMutex.Lock() + defer dirMutex.Unlock() + + readme, err := localReadmeFile() + if err != nil { + log.Fatal(err) + } + dir := filepath.Dir(readme) + if _, err := os.Stat(readme); err != nil { + if err := os.MkdirAll(dir, permissions); err != nil { + log.Fatalf("Could not create directory: %v", err) + } + ioutil.WriteFile(readme, []byte(readmeTxt), permissions) + } + return dir +} + +const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT. + +This directory contains downloaded files used to generate the various tables +in the golang.org/x/text subrepo. + +Note that the language subtag repo (iana/assignments/language-subtag-registry) +and all other times in the iana subdirectory are not versioned and will need +to be periodically manually updated. The easiest way to do this is to remove +the entire iana directory. This is mostly of concern when updating the language +package. +` + +// Open opens subdir/path if a local directory is specified and the file exists, +// where subdir is a directory relative to the local root, or fetches it from +// urlRoot/path otherwise. It will call log.Fatal if there are any errors. +func Open(urlRoot, subdir, path string) io.ReadCloser { + file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path)) + return open(file, urlRoot, path) +} + +func openUnicode(path string) io.ReadCloser { + file := filepath.Join(getLocalDir(), filepath.FromSlash(path)) + return open(file, *url, path) +} + +// TODO: automatically periodically update non-versioned files. + +func open(file, urlRoot, path string) io.ReadCloser { + if f, err := os.Open(file); err == nil { + return f + } + r := get(urlRoot, path) + defer r.Close() + b, err := ioutil.ReadAll(r) + if err != nil { + log.Fatalf("Could not download file: %v", err) + } + os.MkdirAll(filepath.Dir(file), permissions) + if err := ioutil.WriteFile(file, b, permissions); err != nil { + log.Fatalf("Could not create file: %v", err) + } + return ioutil.NopCloser(bytes.NewReader(b)) +} + +func get(root, path string) io.ReadCloser { + url := root + "/" + path + fmt.Printf("Fetching %s...", url) + defer fmt.Println(" done.") + resp, err := http.Get(url) + if err != nil { + log.Fatalf("HTTP GET: %v", err) + } + if resp.StatusCode != 200 { + log.Fatalf("Bad GET status for %q: %q", url, resp.Status) + } + return resp.Body +} + +// TODO: use Write*Version in all applicable packages. + +// WriteUnicodeVersion writes a constant for the Unicode version from which the +// tables are generated. +func WriteUnicodeVersion(w io.Writer) { + fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion()) +} + +// WriteCLDRVersion writes a constant for the CLDR version from which the +// tables are generated. +func WriteCLDRVersion(w io.Writer) { + fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n") + fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion()) +} + +// WriteGoFile prepends a standard file comment and package statement to the +// given bytes, applies gofmt, and writes them to a file with the given name. +// It will call log.Fatal if there are any errors. +func WriteGoFile(filename, pkg string, b []byte) { + w, err := os.Create(filename) + if err != nil { + log.Fatalf("Could not create file %s: %v", filename, err) + } + defer w.Close() + if _, err = WriteGo(w, pkg, b); err != nil { + log.Fatalf("Error writing file %s: %v", filename, err) + } +} + +// WriteGo prepends a standard file comment and package statement to the given +// bytes, applies gofmt, and writes them to w. +func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) { + src := []byte(fmt.Sprintf(header, pkg)) + src = append(src, b...) + formatted, err := format.Source(src) + if err != nil { + // Print the generated code even in case of an error so that the + // returned error can be meaningfully interpreted. + n, _ = w.Write(src) + return n, err + } + return w.Write(formatted) +} + +// Repackage rewrites a Go file from belonging to package main to belonging to +// the given package. +func Repackage(inFile, outFile, pkg string) { + src, err := ioutil.ReadFile(inFile) + if err != nil { + log.Fatalf("reading %s: %v", inFile, err) + } + const toDelete = "package main\n\n" + i := bytes.Index(src, []byte(toDelete)) + if i < 0 { + log.Fatalf("Could not find %q in %s.", toDelete, inFile) + } + w := &bytes.Buffer{} + w.Write(src[i+len(toDelete):]) + WriteGoFile(outFile, pkg, w.Bytes()) +} diff --git a/vendor/golang.org/x/text/internal/triegen/compact.go b/vendor/golang.org/x/text/internal/triegen/compact.go new file mode 100644 index 00000000000..397b975c1b7 --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/compact.go @@ -0,0 +1,58 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package triegen + +// This file defines Compacter and its implementations. + +import "io" + +// A Compacter generates an alternative, more space-efficient way to store a +// trie value block. A trie value block holds all possible values for the last +// byte of a UTF-8 encoded rune. Excluding ASCII characters, a trie value block +// always has 64 values, as a UTF-8 encoding ends with a byte in [0x80, 0xC0). +type Compacter interface { + // Size returns whether the Compacter could encode the given block as well + // as its size in case it can. len(v) is always 64. + Size(v []uint64) (sz int, ok bool) + + // Store stores the block using the Compacter's compression method. + // It returns a handle with which the block can be retrieved. + // len(v) is always 64. + Store(v []uint64) uint32 + + // Print writes the data structures associated to the given store to w. + Print(w io.Writer) error + + // Handler returns the name of a function that gets called during trie + // lookup for blocks generated by the Compacter. The function should be of + // the form func (n uint32, b byte) uint64, where n is the index returned by + // the Compacter's Store method and b is the last byte of the UTF-8 + // encoding, where 0x80 <= b < 0xC0, for which to do the lookup in the + // block. + Handler() string +} + +// simpleCompacter is the default Compacter used by builder. It implements a +// normal trie block. +type simpleCompacter builder + +func (b *simpleCompacter) Size([]uint64) (sz int, ok bool) { + return blockSize * b.ValueSize, true +} + +func (b *simpleCompacter) Store(v []uint64) uint32 { + h := uint32(len(b.ValueBlocks) - blockOffset) + b.ValueBlocks = append(b.ValueBlocks, v) + return h +} + +func (b *simpleCompacter) Print(io.Writer) error { + // Structures are printed in print.go. + return nil +} + +func (b *simpleCompacter) Handler() string { + panic("Handler should be special-cased for this Compacter") +} diff --git a/vendor/golang.org/x/text/internal/triegen/print.go b/vendor/golang.org/x/text/internal/triegen/print.go new file mode 100644 index 00000000000..8d9f120bcdf --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/print.go @@ -0,0 +1,251 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package triegen + +import ( + "bytes" + "fmt" + "io" + "strings" + "text/template" +) + +// print writes all the data structures as well as the code necessary to use the +// trie to w. +func (b *builder) print(w io.Writer) error { + b.Stats.NValueEntries = len(b.ValueBlocks) * blockSize + b.Stats.NValueBytes = len(b.ValueBlocks) * blockSize * b.ValueSize + b.Stats.NIndexEntries = len(b.IndexBlocks) * blockSize + b.Stats.NIndexBytes = len(b.IndexBlocks) * blockSize * b.IndexSize + b.Stats.NHandleBytes = len(b.Trie) * 2 * b.IndexSize + + // If we only have one root trie, all starter blocks are at position 0 and + // we can access the arrays directly. + if len(b.Trie) == 1 { + // At this point we cannot refer to the generated tables directly. + b.ASCIIBlock = b.Name + "Values" + b.StarterBlock = b.Name + "Index" + } else { + // Otherwise we need to have explicit starter indexes in the trie + // structure. + b.ASCIIBlock = "t.ascii" + b.StarterBlock = "t.utf8Start" + } + + b.SourceType = "[]byte" + if err := lookupGen.Execute(w, b); err != nil { + return err + } + + b.SourceType = "string" + if err := lookupGen.Execute(w, b); err != nil { + return err + } + + if err := trieGen.Execute(w, b); err != nil { + return err + } + + for _, c := range b.Compactions { + if err := c.c.Print(w); err != nil { + return err + } + } + + return nil +} + +func printValues(n int, values []uint64) string { + w := &bytes.Buffer{} + boff := n * blockSize + fmt.Fprintf(w, "\t// Block %#x, offset %#x", n, boff) + var newline bool + for i, v := range values { + if i%6 == 0 { + newline = true + } + if v != 0 { + if newline { + fmt.Fprintf(w, "\n") + newline = false + } + fmt.Fprintf(w, "\t%#02x:%#04x, ", boff+i, v) + } + } + return w.String() +} + +func printIndex(b *builder, nr int, n *node) string { + w := &bytes.Buffer{} + boff := nr * blockSize + fmt.Fprintf(w, "\t// Block %#x, offset %#x", nr, boff) + var newline bool + for i, c := range n.children { + if i%8 == 0 { + newline = true + } + if c != nil { + v := b.Compactions[c.index.compaction].Offset + uint32(c.index.index) + if v != 0 { + if newline { + fmt.Fprintf(w, "\n") + newline = false + } + fmt.Fprintf(w, "\t%#02x:%#02x, ", boff+i, v) + } + } + } + return w.String() +} + +var ( + trieGen = template.Must(template.New("trie").Funcs(template.FuncMap{ + "printValues": printValues, + "printIndex": printIndex, + "title": strings.Title, + "dec": func(x int) int { return x - 1 }, + "psize": func(n int) string { + return fmt.Sprintf("%d bytes (%.2f KiB)", n, float64(n)/1024) + }, + }).Parse(trieTemplate)) + lookupGen = template.Must(template.New("lookup").Parse(lookupTemplate)) +) + +// TODO: consider the return type of lookup. It could be uint64, even if the +// internal value type is smaller. We will have to verify this with the +// performance of unicode/norm, which is very sensitive to such changes. +const trieTemplate = `{{$b := .}}{{$multi := gt (len .Trie) 1}} +// {{.Name}}Trie. Total size: {{psize .Size}}. Checksum: {{printf "%08x" .Checksum}}. +type {{.Name}}Trie struct { {{if $multi}} + ascii []{{.ValueType}} // index for ASCII bytes + utf8Start []{{.IndexType}} // index for UTF-8 bytes >= 0xC0 +{{end}}} + +func new{{title .Name}}Trie(i int) *{{.Name}}Trie { {{if $multi}} + h := {{.Name}}TrieHandles[i] + return &{{.Name}}Trie{ {{.Name}}Values[uint32(h.ascii)<<6:], {{.Name}}Index[uint32(h.multi)<<6:] } +} + +type {{.Name}}TrieHandle struct { + ascii, multi {{.IndexType}} +} + +// {{.Name}}TrieHandles: {{len .Trie}} handles, {{.Stats.NHandleBytes}} bytes +var {{.Name}}TrieHandles = [{{len .Trie}}]{{.Name}}TrieHandle{ +{{range .Trie}} { {{.ASCIIIndex}}, {{.StarterIndex}} }, // {{printf "%08x" .Checksum}}: {{.Name}} +{{end}}}{{else}} + return &{{.Name}}Trie{} +} +{{end}} +// lookupValue determines the type of block n and looks up the value for b. +func (t *{{.Name}}Trie) lookupValue(n uint32, b byte) {{.ValueType}}{{$last := dec (len .Compactions)}} { + switch { {{range $i, $c := .Compactions}} + {{if eq $i $last}}default{{else}}case n < {{$c.Cutoff}}{{end}}:{{if ne $i 0}} + n -= {{$c.Offset}}{{end}} + return {{print $b.ValueType}}({{$c.Handler}}){{end}} + } +} + +// {{.Name}}Values: {{len .ValueBlocks}} blocks, {{.Stats.NValueEntries}} entries, {{.Stats.NValueBytes}} bytes +// The third block is the zero block. +var {{.Name}}Values = [{{.Stats.NValueEntries}}]{{.ValueType}} { +{{range $i, $v := .ValueBlocks}}{{printValues $i $v}} +{{end}}} + +// {{.Name}}Index: {{len .IndexBlocks}} blocks, {{.Stats.NIndexEntries}} entries, {{.Stats.NIndexBytes}} bytes +// Block 0 is the zero block. +var {{.Name}}Index = [{{.Stats.NIndexEntries}}]{{.IndexType}} { +{{range $i, $v := .IndexBlocks}}{{printIndex $b $i $v}} +{{end}}} +` + +// TODO: consider allowing zero-length strings after evaluating performance with +// unicode/norm. +const lookupTemplate = ` +// lookup{{if eq .SourceType "string"}}String{{end}} returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}(s {{.SourceType}}) (v {{.ValueType}}, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return {{.ASCIIBlock}}[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = {{.Name}}Index[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := {{.StarterBlock}}[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = {{.Name}}Index[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = {{.Name}}Index[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookup{{if eq .SourceType "string"}}String{{end}}Unsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}Unsafe(s {{.SourceType}}) {{.ValueType}} { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return {{.ASCIIBlock}}[c0] + } + i := {{.StarterBlock}}[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = {{.Name}}Index[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = {{.Name}}Index[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} +` diff --git a/vendor/golang.org/x/text/internal/triegen/triegen.go b/vendor/golang.org/x/text/internal/triegen/triegen.go new file mode 100644 index 00000000000..adb01081247 --- /dev/null +++ b/vendor/golang.org/x/text/internal/triegen/triegen.go @@ -0,0 +1,494 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package triegen implements a code generator for a trie for associating +// unsigned integer values with UTF-8 encoded runes. +// +// Many of the go.text packages use tries for storing per-rune information. A +// trie is especially useful if many of the runes have the same value. If this +// is the case, many blocks can be expected to be shared allowing for +// information on many runes to be stored in little space. +// +// As most of the lookups are done directly on []byte slices, the tries use the +// UTF-8 bytes directly for the lookup. This saves a conversion from UTF-8 to +// runes and contributes a little bit to better performance. It also naturally +// provides a fast path for ASCII. +// +// Space is also an issue. There are many code points defined in Unicode and as +// a result tables can get quite large. So every byte counts. The triegen +// package automatically chooses the smallest integer values to represent the +// tables. Compacters allow further compression of the trie by allowing for +// alternative representations of individual trie blocks. +// +// triegen allows generating multiple tries as a single structure. This is +// useful when, for example, one wants to generate tries for several languages +// that have a lot of values in common. Some existing libraries for +// internationalization store all per-language data as a dynamically loadable +// chunk. The go.text packages are designed with the assumption that the user +// typically wants to compile in support for all supported languages, in line +// with the approach common to Go to create a single standalone binary. The +// multi-root trie approach can give significant storage savings in this +// scenario. +// +// triegen generates both tables and code. The code is optimized to use the +// automatically chosen data types. The following code is generated for a Trie +// or multiple Tries named "foo": +// - type fooTrie +// The trie type. +// +// - func newFooTrie(x int) *fooTrie +// Trie constructor, where x is the index of the trie passed to Gen. +// +// - func (t *fooTrie) lookup(s []byte) (v uintX, sz int) +// The lookup method, where uintX is automatically chosen. +// +// - func lookupString, lookupUnsafe and lookupStringUnsafe +// Variants of the above. +// +// - var fooValues and fooIndex and any tables generated by Compacters. +// The core trie data. +// +// - var fooTrieHandles +// Indexes of starter blocks in case of multiple trie roots. +// +// It is recommended that users test the generated trie by checking the returned +// value for every rune. Such exhaustive tests are possible as the the number of +// runes in Unicode is limited. +package triegen // import "golang.org/x/text/internal/triegen" + +// TODO: Arguably, the internally optimized data types would not have to be +// exposed in the generated API. We could also investigate not generating the +// code, but using it through a package. We would have to investigate the impact +// on performance of making such change, though. For packages like unicode/norm, +// small changes like this could tank performance. + +import ( + "encoding/binary" + "fmt" + "hash/crc64" + "io" + "log" + "unicode/utf8" +) + +// builder builds a set of tries for associating values with runes. The set of +// tries can share common index and value blocks. +type builder struct { + Name string + + // ValueType is the type of the trie values looked up. + ValueType string + + // ValueSize is the byte size of the ValueType. + ValueSize int + + // IndexType is the type of trie index values used for all UTF-8 bytes of + // a rune except the last one. + IndexType string + + // IndexSize is the byte size of the IndexType. + IndexSize int + + // SourceType is used when generating the lookup functions. If the user + // requests StringSupport, all lookup functions will be generated for + // string input as well. + SourceType string + + Trie []*Trie + + IndexBlocks []*node + ValueBlocks [][]uint64 + Compactions []compaction + Checksum uint64 + + ASCIIBlock string + StarterBlock string + + indexBlockIdx map[uint64]int + valueBlockIdx map[uint64]nodeIndex + asciiBlockIdx map[uint64]int + + // Stats are used to fill out the template. + Stats struct { + NValueEntries int + NValueBytes int + NIndexEntries int + NIndexBytes int + NHandleBytes int + } + + err error +} + +// A nodeIndex encodes the index of a node, which is defined by the compaction +// which stores it and an index within the compaction. For internal nodes, the +// compaction is always 0. +type nodeIndex struct { + compaction int + index int +} + +// compaction keeps track of stats used for the compaction. +type compaction struct { + c Compacter + blocks []*node + maxHandle uint32 + totalSize int + + // Used by template-based generator and thus exported. + Cutoff uint32 + Offset uint32 + Handler string +} + +func (b *builder) setError(err error) { + if b.err == nil { + b.err = err + } +} + +// An Option can be passed to Gen. +type Option func(b *builder) error + +// Compact configures the trie generator to use the given Compacter. +func Compact(c Compacter) Option { + return func(b *builder) error { + b.Compactions = append(b.Compactions, compaction{ + c: c, + Handler: c.Handler() + "(n, b)"}) + return nil + } +} + +// Gen writes Go code for a shared trie lookup structure to w for the given +// Tries. The generated trie type will be called nameTrie. newNameTrie(x) will +// return the *nameTrie for tries[x]. A value can be looked up by using one of +// the various lookup methods defined on nameTrie. It returns the table size of +// the generated trie. +func Gen(w io.Writer, name string, tries []*Trie, opts ...Option) (sz int, err error) { + // The index contains two dummy blocks, followed by the zero block. The zero + // block is at offset 0x80, so that the offset for the zero block for + // continuation bytes is 0. + b := &builder{ + Name: name, + Trie: tries, + IndexBlocks: []*node{{}, {}, {}}, + Compactions: []compaction{{ + Handler: name + "Values[n<<6+uint32(b)]", + }}, + // The 0 key in indexBlockIdx and valueBlockIdx is the hash of the zero + // block. + indexBlockIdx: map[uint64]int{0: 0}, + valueBlockIdx: map[uint64]nodeIndex{0: {}}, + asciiBlockIdx: map[uint64]int{}, + } + b.Compactions[0].c = (*simpleCompacter)(b) + + for _, f := range opts { + if err := f(b); err != nil { + return 0, err + } + } + b.build() + if b.err != nil { + return 0, b.err + } + if err = b.print(w); err != nil { + return 0, err + } + return b.Size(), nil +} + +// A Trie represents a single root node of a trie. A builder may build several +// overlapping tries at once. +type Trie struct { + root *node + + hiddenTrie +} + +// hiddenTrie contains values we want to be visible to the template generator, +// but hidden from the API documentation. +type hiddenTrie struct { + Name string + Checksum uint64 + ASCIIIndex int + StarterIndex int +} + +// NewTrie returns a new trie root. +func NewTrie(name string) *Trie { + return &Trie{ + &node{ + children: make([]*node, blockSize), + values: make([]uint64, utf8.RuneSelf), + }, + hiddenTrie{Name: name}, + } +} + +// Gen is a convenience wrapper around the Gen func passing t as the only trie +// and uses the name passed to NewTrie. It returns the size of the generated +// tables. +func (t *Trie) Gen(w io.Writer, opts ...Option) (sz int, err error) { + return Gen(w, t.Name, []*Trie{t}, opts...) +} + +// node is a node of the intermediate trie structure. +type node struct { + // children holds this node's children. It is always of length 64. + // A child node may be nil. + children []*node + + // values contains the values of this node. If it is non-nil, this node is + // either a root or leaf node: + // For root nodes, len(values) == 128 and it maps the bytes in [0x00, 0x7F]. + // For leaf nodes, len(values) == 64 and it maps the bytes in [0x80, 0xBF]. + values []uint64 + + index nodeIndex +} + +// Insert associates value with the given rune. Insert will panic if a non-zero +// value is passed for an invalid rune. +func (t *Trie) Insert(r rune, value uint64) { + if value == 0 { + return + } + s := string(r) + if []rune(s)[0] != r && value != 0 { + // Note: The UCD tables will always assign what amounts to a zero value + // to a surrogate. Allowing a zero value for an illegal rune allows + // users to iterate over [0..MaxRune] without having to explicitly + // exclude surrogates, which would be tedious. + panic(fmt.Sprintf("triegen: non-zero value for invalid rune %U", r)) + } + if len(s) == 1 { + // It is a root node value (ASCII). + t.root.values[s[0]] = value + return + } + + n := t.root + for ; len(s) > 1; s = s[1:] { + if n.children == nil { + n.children = make([]*node, blockSize) + } + p := s[0] % blockSize + c := n.children[p] + if c == nil { + c = &node{} + n.children[p] = c + } + if len(s) > 2 && c.values != nil { + log.Fatalf("triegen: insert(%U): found internal node with values", r) + } + n = c + } + if n.values == nil { + n.values = make([]uint64, blockSize) + } + if n.children != nil { + log.Fatalf("triegen: insert(%U): found leaf node that also has child nodes", r) + } + n.values[s[0]-0x80] = value +} + +// Size returns the number of bytes the generated trie will take to store. It +// needs to be exported as it is used in the templates. +func (b *builder) Size() int { + // Index blocks. + sz := len(b.IndexBlocks) * blockSize * b.IndexSize + + // Skip the first compaction, which represents the normal value blocks, as + // its totalSize does not account for the ASCII blocks, which are managed + // separately. + sz += len(b.ValueBlocks) * blockSize * b.ValueSize + for _, c := range b.Compactions[1:] { + sz += c.totalSize + } + + // TODO: this computation does not account for the fixed overhead of a using + // a compaction, either code or data. As for data, though, the typical + // overhead of data is in the order of bytes (2 bytes for cases). Further, + // the savings of using a compaction should anyway be substantial for it to + // be worth it. + + // For multi-root tries, we also need to account for the handles. + if len(b.Trie) > 1 { + sz += 2 * b.IndexSize * len(b.Trie) + } + return sz +} + +func (b *builder) build() { + // Compute the sizes of the values. + var vmax uint64 + for _, t := range b.Trie { + vmax = maxValue(t.root, vmax) + } + b.ValueType, b.ValueSize = getIntType(vmax) + + // Compute all block allocations. + // TODO: first compute the ASCII blocks for all tries and then the other + // nodes. ASCII blocks are more restricted in placement, as they require two + // blocks to be placed consecutively. Processing them first may improve + // sharing (at least one zero block can be expected to be saved.) + for _, t := range b.Trie { + b.Checksum += b.buildTrie(t) + } + + // Compute the offsets for all the Compacters. + offset := uint32(0) + for i := range b.Compactions { + c := &b.Compactions[i] + c.Offset = offset + offset += c.maxHandle + 1 + c.Cutoff = offset + } + + // Compute the sizes of indexes. + // TODO: different byte positions could have different sizes. So far we have + // not found a case where this is beneficial. + imax := uint64(b.Compactions[len(b.Compactions)-1].Cutoff) + for _, ib := range b.IndexBlocks { + if x := uint64(ib.index.index); x > imax { + imax = x + } + } + b.IndexType, b.IndexSize = getIntType(imax) +} + +func maxValue(n *node, max uint64) uint64 { + if n == nil { + return max + } + for _, c := range n.children { + max = maxValue(c, max) + } + for _, v := range n.values { + if max < v { + max = v + } + } + return max +} + +func getIntType(v uint64) (string, int) { + switch { + case v < 1<<8: + return "uint8", 1 + case v < 1<<16: + return "uint16", 2 + case v < 1<<32: + return "uint32", 4 + } + return "uint64", 8 +} + +const ( + blockSize = 64 + + // Subtract two blocks to offset 0x80, the first continuation byte. + blockOffset = 2 + + // Subtract three blocks to offset 0xC0, the first non-ASCII starter. + rootBlockOffset = 3 +) + +var crcTable = crc64.MakeTable(crc64.ISO) + +func (b *builder) buildTrie(t *Trie) uint64 { + n := t.root + + // Get the ASCII offset. For the first trie, the ASCII block will be at + // position 0. + hasher := crc64.New(crcTable) + binary.Write(hasher, binary.BigEndian, n.values) + hash := hasher.Sum64() + + v, ok := b.asciiBlockIdx[hash] + if !ok { + v = len(b.ValueBlocks) + b.asciiBlockIdx[hash] = v + + b.ValueBlocks = append(b.ValueBlocks, n.values[:blockSize], n.values[blockSize:]) + if v == 0 { + // Add the zero block at position 2 so that it will be assigned a + // zero reference in the lookup blocks. + // TODO: always do this? This would allow us to remove a check from + // the trie lookup, but at the expense of extra space. Analyze + // performance for unicode/norm. + b.ValueBlocks = append(b.ValueBlocks, make([]uint64, blockSize)) + } + } + t.ASCIIIndex = v + + // Compute remaining offsets. + t.Checksum = b.computeOffsets(n, true) + // We already subtracted the normal blockOffset from the index. Subtract the + // difference for starter bytes. + t.StarterIndex = n.index.index - (rootBlockOffset - blockOffset) + return t.Checksum +} + +func (b *builder) computeOffsets(n *node, root bool) uint64 { + // For the first trie, the root lookup block will be at position 3, which is + // the offset for UTF-8 non-ASCII starter bytes. + first := len(b.IndexBlocks) == rootBlockOffset + if first { + b.IndexBlocks = append(b.IndexBlocks, n) + } + + // We special-case the cases where all values recursively are 0. This allows + // for the use of a zero block to which all such values can be directed. + hash := uint64(0) + if n.children != nil || n.values != nil { + hasher := crc64.New(crcTable) + for _, c := range n.children { + var v uint64 + if c != nil { + v = b.computeOffsets(c, false) + } + binary.Write(hasher, binary.BigEndian, v) + } + binary.Write(hasher, binary.BigEndian, n.values) + hash = hasher.Sum64() + } + + if first { + b.indexBlockIdx[hash] = rootBlockOffset - blockOffset + } + + // Compacters don't apply to internal nodes. + if n.children != nil { + v, ok := b.indexBlockIdx[hash] + if !ok { + v = len(b.IndexBlocks) - blockOffset + b.IndexBlocks = append(b.IndexBlocks, n) + b.indexBlockIdx[hash] = v + } + n.index = nodeIndex{0, v} + } else { + h, ok := b.valueBlockIdx[hash] + if !ok { + bestI, bestSize := 0, blockSize*b.ValueSize + for i, c := range b.Compactions[1:] { + if sz, ok := c.c.Size(n.values); ok && bestSize > sz { + bestI, bestSize = i+1, sz + } + } + c := &b.Compactions[bestI] + c.totalSize += bestSize + v := c.c.Store(n.values) + if c.maxHandle < v { + c.maxHandle = v + } + h = nodeIndex{bestI, int(v)} + b.valueBlockIdx[hash] = h + } + n.index = h + } + return hash +} diff --git a/vendor/golang.org/x/text/internal/ucd/ucd.go b/vendor/golang.org/x/text/internal/ucd/ucd.go new file mode 100644 index 00000000000..309e8d8b16e --- /dev/null +++ b/vendor/golang.org/x/text/internal/ucd/ucd.go @@ -0,0 +1,376 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ucd provides a parser for Unicode Character Database files, the +// format of which is defined in http://www.unicode.org/reports/tr44/. See +// http://www.unicode.org/Public/UCD/latest/ucd/ for example files. +// +// It currently does not support substitutions of missing fields. +package ucd // import "golang.org/x/text/internal/ucd" + +import ( + "bufio" + "bytes" + "errors" + "io" + "log" + "regexp" + "strconv" + "strings" +) + +// UnicodeData.txt fields. +const ( + CodePoint = iota + Name + GeneralCategory + CanonicalCombiningClass + BidiClass + DecompMapping + DecimalValue + DigitValue + NumericValue + BidiMirrored + Unicode1Name + ISOComment + SimpleUppercaseMapping + SimpleLowercaseMapping + SimpleTitlecaseMapping +) + +// Parse calls f for each entry in the given reader of a UCD file. It will close +// the reader upon return. It will call log.Fatal if any error occurred. +// +// This implements the most common usage pattern of using Parser. +func Parse(r io.ReadCloser, f func(p *Parser)) { + defer r.Close() + + p := New(r) + for p.Next() { + f(p) + } + if err := p.Err(); err != nil { + r.Close() // os.Exit will cause defers not to be called. + log.Fatal(err) + } +} + +// An Option is used to configure a Parser. +type Option func(p *Parser) + +func keepRanges(p *Parser) { + p.keepRanges = true +} + +var ( + // KeepRanges prevents the expansion of ranges. The raw ranges can be + // obtained by calling Range(0) on the parser. + KeepRanges Option = keepRanges +) + +// The Part option register a handler for lines starting with a '@'. The text +// after a '@' is available as the first field. Comments are handled as usual. +func Part(f func(p *Parser)) Option { + return func(p *Parser) { + p.partHandler = f + } +} + +// The CommentHandler option passes comments that are on a line by itself to +// a given handler. +func CommentHandler(f func(s string)) Option { + return func(p *Parser) { + p.commentHandler = f + } +} + +// A Parser parses Unicode Character Database (UCD) files. +type Parser struct { + scanner *bufio.Scanner + + keepRanges bool // Don't expand rune ranges in field 0. + + err error + comment []byte + field [][]byte + // parsedRange is needed in case Range(0) is called more than once for one + // field. In some cases this requires scanning ahead. + parsedRange bool + rangeStart, rangeEnd rune + + partHandler func(p *Parser) + commentHandler func(s string) +} + +func (p *Parser) setError(err error) { + if p.err == nil { + p.err = err + } +} + +func (p *Parser) getField(i int) []byte { + if i >= len(p.field) { + return nil + } + return p.field[i] +} + +// Err returns a non-nil error if any error occurred during parsing. +func (p *Parser) Err() error { + return p.err +} + +// New returns a Parser for the given Reader. +func New(r io.Reader, o ...Option) *Parser { + p := &Parser{ + scanner: bufio.NewScanner(r), + } + for _, f := range o { + f(p) + } + return p +} + +// Next parses the next line in the file. It returns true if a line was parsed +// and false if it reached the end of the file. +func (p *Parser) Next() bool { + if !p.keepRanges && p.rangeStart < p.rangeEnd { + p.rangeStart++ + return true + } + p.comment = nil + p.field = p.field[:0] + p.parsedRange = false + + for p.scanner.Scan() { + b := p.scanner.Bytes() + if len(b) == 0 { + continue + } + if b[0] == '#' { + if p.commentHandler != nil { + p.commentHandler(strings.TrimSpace(string(b[1:]))) + } + continue + } + + // Parse line + if i := bytes.IndexByte(b, '#'); i != -1 { + p.comment = bytes.TrimSpace(b[i+1:]) + b = b[:i] + } + if b[0] == '@' { + if p.partHandler != nil { + p.field = append(p.field, bytes.TrimSpace(b[1:])) + p.partHandler(p) + p.field = p.field[:0] + } + p.comment = nil + continue + } + for { + i := bytes.IndexByte(b, ';') + if i == -1 { + p.field = append(p.field, bytes.TrimSpace(b)) + break + } + p.field = append(p.field, bytes.TrimSpace(b[:i])) + b = b[i+1:] + } + if !p.keepRanges { + p.rangeStart, p.rangeEnd = p.getRange(0) + } + return true + } + p.setError(p.scanner.Err()) + return false +} + +func parseRune(b []byte) (rune, error) { + if len(b) > 2 && b[0] == 'U' && b[1] == '+' { + b = b[2:] + } + x, err := strconv.ParseUint(string(b), 16, 32) + return rune(x), err +} + +func (p *Parser) parseRune(b []byte) rune { + x, err := parseRune(b) + p.setError(err) + return x +} + +// Rune parses and returns field i as a rune. +func (p *Parser) Rune(i int) rune { + if i > 0 || p.keepRanges { + return p.parseRune(p.getField(i)) + } + return p.rangeStart +} + +// Runes interprets and returns field i as a sequence of runes. +func (p *Parser) Runes(i int) (runes []rune) { + add := func(b []byte) { + if b = bytes.TrimSpace(b); len(b) > 0 { + runes = append(runes, p.parseRune(b)) + } + } + for b := p.getField(i); ; { + i := bytes.IndexByte(b, ' ') + if i == -1 { + add(b) + break + } + add(b[:i]) + b = b[i+1:] + } + return +} + +var ( + errIncorrectLegacyRange = errors.New("ucd: unmatched <* First>") + + // reRange matches one line of a legacy rune range. + reRange = regexp.MustCompile("^([0-9A-F]*);<([^,]*), ([^>]*)>(.*)$") +) + +// Range parses and returns field i as a rune range. A range is inclusive at +// both ends. If the field only has one rune, first and last will be identical. +// It supports the legacy format for ranges used in UnicodeData.txt. +func (p *Parser) Range(i int) (first, last rune) { + if !p.keepRanges { + return p.rangeStart, p.rangeStart + } + return p.getRange(i) +} + +func (p *Parser) getRange(i int) (first, last rune) { + b := p.getField(i) + if k := bytes.Index(b, []byte("..")); k != -1 { + return p.parseRune(b[:k]), p.parseRune(b[k+2:]) + } + // The first field may not be a rune, in which case we may ignore any error + // and set the range as 0..0. + x, err := parseRune(b) + if err != nil { + // Disable range parsing henceforth. This ensures that an error will be + // returned if the user subsequently will try to parse this field as + // a Rune. + p.keepRanges = true + } + // Special case for UnicodeData that was retained for backwards compatibility. + if i == 0 && len(p.field) > 1 && bytes.HasSuffix(p.field[1], []byte("First>")) { + if p.parsedRange { + return p.rangeStart, p.rangeEnd + } + mf := reRange.FindStringSubmatch(p.scanner.Text()) + if mf == nil || !p.scanner.Scan() { + p.setError(errIncorrectLegacyRange) + return x, x + } + // Using Bytes would be more efficient here, but Text is a lot easier + // and this is not a frequent case. + ml := reRange.FindStringSubmatch(p.scanner.Text()) + if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] { + p.setError(errIncorrectLegacyRange) + return x, x + } + p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Bytes()[:len(ml[1])]) + p.parsedRange = true + return p.rangeStart, p.rangeEnd + } + return x, x +} + +// bools recognizes all valid UCD boolean values. +var bools = map[string]bool{ + "": false, + "N": false, + "No": false, + "F": false, + "False": false, + "Y": true, + "Yes": true, + "T": true, + "True": true, +} + +// Bool parses and returns field i as a boolean value. +func (p *Parser) Bool(i int) bool { + b := p.getField(i) + for s, v := range bools { + if bstrEq(b, s) { + return v + } + } + p.setError(strconv.ErrSyntax) + return false +} + +// Int parses and returns field i as an integer value. +func (p *Parser) Int(i int) int { + x, err := strconv.ParseInt(string(p.getField(i)), 10, 64) + p.setError(err) + return int(x) +} + +// Uint parses and returns field i as an unsigned integer value. +func (p *Parser) Uint(i int) uint { + x, err := strconv.ParseUint(string(p.getField(i)), 10, 64) + p.setError(err) + return uint(x) +} + +// Float parses and returns field i as a decimal value. +func (p *Parser) Float(i int) float64 { + x, err := strconv.ParseFloat(string(p.getField(i)), 64) + p.setError(err) + return x +} + +// String parses and returns field i as a string value. +func (p *Parser) String(i int) string { + return string(p.getField(i)) +} + +// Strings parses and returns field i as a space-separated list of strings. +func (p *Parser) Strings(i int) []string { + ss := strings.Split(string(p.getField(i)), " ") + for i, s := range ss { + ss[i] = strings.TrimSpace(s) + } + return ss +} + +// Comment returns the comments for the current line. +func (p *Parser) Comment() string { + return string(p.comment) +} + +var errUndefinedEnum = errors.New("ucd: undefined enum value") + +// Enum interprets and returns field i as a value that must be one of the values +// in enum. +func (p *Parser) Enum(i int, enum ...string) string { + b := p.getField(i) + for _, s := range enum { + if bstrEq(b, s) { + return s + } + } + p.setError(errUndefinedEnum) + return "" +} + +func bstrEq(b []byte, s string) bool { + if len(b) != len(s) { + return false + } + for i, c := range b { + if c != s[i] { + return false + } + } + return true +} diff --git a/vendor/golang.org/x/text/unicode/cldr/base.go b/vendor/golang.org/x/text/unicode/cldr/base.go new file mode 100644 index 00000000000..2382f4d6da1 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/base.go @@ -0,0 +1,100 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "encoding/xml" + "regexp" + "strconv" +) + +// Elem is implemented by every XML element. +type Elem interface { + setEnclosing(Elem) + setName(string) + enclosing() Elem + + GetCommon() *Common +} + +type hidden struct { + CharData string `xml:",chardata"` + Alias *struct { + Common + Source string `xml:"source,attr"` + Path string `xml:"path,attr"` + } `xml:"alias"` + Def *struct { + Common + Choice string `xml:"choice,attr,omitempty"` + Type string `xml:"type,attr,omitempty"` + } `xml:"default"` +} + +// Common holds several of the most common attributes and sub elements +// of an XML element. +type Common struct { + XMLName xml.Name + name string + enclElem Elem + Type string `xml:"type,attr,omitempty"` + Reference string `xml:"reference,attr,omitempty"` + Alt string `xml:"alt,attr,omitempty"` + ValidSubLocales string `xml:"validSubLocales,attr,omitempty"` + Draft string `xml:"draft,attr,omitempty"` + hidden +} + +// Default returns the default type to select from the enclosed list +// or "" if no default value is specified. +func (e *Common) Default() string { + if e.Def == nil { + return "" + } + if e.Def.Choice != "" { + return e.Def.Choice + } else if e.Def.Type != "" { + // Type is still used by the default element in collation. + return e.Def.Type + } + return "" +} + +// GetCommon returns e. It is provided such that Common implements Elem. +func (e *Common) GetCommon() *Common { + return e +} + +// Data returns the character data accumulated for this element. +func (e *Common) Data() string { + e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode) + return e.CharData +} + +func (e *Common) setName(s string) { + e.name = s +} + +func (e *Common) enclosing() Elem { + return e.enclElem +} + +func (e *Common) setEnclosing(en Elem) { + e.enclElem = en +} + +// Escape characters that can be escaped without further escaping the string. +var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`) + +// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string. +// It assumes the input string is correctly formatted. +func replaceUnicode(s string) string { + if s[1] == '#' { + r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32) + return string(r) + } + r, _, _, _ := strconv.UnquoteChar(s, 0) + return string(r) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/cldr.go b/vendor/golang.org/x/text/unicode/cldr/cldr.go new file mode 100644 index 00000000000..2197f8ac268 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/cldr.go @@ -0,0 +1,130 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run makexml.go -output xml.go + +// Package cldr provides a parser for LDML and related XML formats. +// This package is intended to be used by the table generation tools +// for the various internationalization-related packages. +// As the XML types are generated from the CLDR DTD, and as the CLDR standard +// is periodically amended, this package may change considerably over time. +// This mostly means that data may appear and disappear between versions. +// That is, old code should keep compiling for newer versions, but data +// may have moved or changed. +// CLDR version 22 is the first version supported by this package. +// Older versions may not work. +package cldr // import "golang.org/x/text/unicode/cldr" + +import ( + "fmt" + "sort" +) + +// CLDR provides access to parsed data of the Unicode Common Locale Data Repository. +type CLDR struct { + parent map[string][]string + locale map[string]*LDML + resolved map[string]*LDML + bcp47 *LDMLBCP47 + supp *SupplementalData +} + +func makeCLDR() *CLDR { + return &CLDR{ + parent: make(map[string][]string), + locale: make(map[string]*LDML), + resolved: make(map[string]*LDML), + bcp47: &LDMLBCP47{}, + supp: &SupplementalData{}, + } +} + +// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned. +func (cldr *CLDR) BCP47() *LDMLBCP47 { + return nil +} + +// Draft indicates the draft level of an element. +type Draft int + +const ( + Approved Draft = iota + Contributed + Provisional + Unconfirmed +) + +var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""} + +// ParseDraft returns the Draft value corresponding to the given string. The +// empty string corresponds to Approved. +func ParseDraft(level string) (Draft, error) { + if level == "" { + return Approved, nil + } + for i, s := range drafts { + if level == s { + return Unconfirmed - Draft(i), nil + } + } + return Approved, fmt.Errorf("cldr: unknown draft level %q", level) +} + +func (d Draft) String() string { + return drafts[len(drafts)-1-int(d)] +} + +// SetDraftLevel sets which draft levels to include in the evaluated LDML. +// Any draft element for which the draft level is higher than lev will be excluded. +// If multiple draft levels are available for a single element, the one with the +// lowest draft level will be selected, unless preferDraft is true, in which case +// the highest draft will be chosen. +// It is assumed that the underlying LDML is canonicalized. +func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) { + // TODO: implement + cldr.resolved = make(map[string]*LDML) +} + +// RawLDML returns the LDML XML for id in unresolved form. +// id must be one of the strings returned by Locales. +func (cldr *CLDR) RawLDML(loc string) *LDML { + return cldr.locale[loc] +} + +// LDML returns the fully resolved LDML XML for loc, which must be one of +// the strings returned by Locales. +func (cldr *CLDR) LDML(loc string) (*LDML, error) { + return cldr.resolve(loc) +} + +// Supplemental returns the parsed supplemental data. If no such data was parsed, +// nil is returned. +func (cldr *CLDR) Supplemental() *SupplementalData { + return cldr.supp +} + +// Locales returns the locales for which there exist files. +// Valid sublocales for which there is no file are not included. +// The root locale is always sorted first. +func (cldr *CLDR) Locales() []string { + loc := []string{"root"} + hasRoot := false + for l, _ := range cldr.locale { + if l == "root" { + hasRoot = true + continue + } + loc = append(loc, l) + } + sort.Strings(loc[1:]) + if !hasRoot { + return loc[1:] + } + return loc +} + +// Get fills in the fields of x based on the XPath path. +func Get(e Elem, path string) (res Elem, err error) { + return walkXPath(e, path) +} diff --git a/vendor/golang.org/x/text/unicode/cldr/collate.go b/vendor/golang.org/x/text/unicode/cldr/collate.go new file mode 100644 index 00000000000..80ee28d795e --- /dev/null +++ b/vendor/golang.org/x/text/unicode/cldr/collate.go @@ -0,0 +1,359 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cldr + +import ( + "bufio" + "encoding/xml" + "errors" + "fmt" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// RuleProcessor can be passed to Collator's Process method, which +// parses the rules and calls the respective method for each rule found. +type RuleProcessor interface { + Reset(anchor string, before int) error + Insert(level int, str, context, extend string) error + Index(id string) +} + +const ( + // cldrIndex is a Unicode-reserved sentinel value used to mark the start + // of a grouping within an index. + // We ignore any rule that starts with this rune. + // See http://unicode.org/reports/tr35/#Collation_Elements for details. + cldrIndex = "\uFDD0" + + // specialAnchor is the format in which to represent logical reset positions, + // such as "first tertiary ignorable". + specialAnchor = "<%s/>" +) + +// Process parses the rules for the tailorings of this collation +// and calls the respective methods of p for each rule found. +func (c Collation) Process(p RuleProcessor) (err error) { + if len(c.Cr) > 0 { + if len(c.Cr) > 1 { + return fmt.Errorf("multiple cr elements, want 0 or 1") + } + return processRules(p, c.Cr[0].Data()) + } + if c.Rules.Any != nil { + return c.processXML(p) + } + return errors.New("no tailoring data") +} + +// processRules parses rules in the Collation Rule Syntax defined in +// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings. +func processRules(p RuleProcessor, s string) (err error) { + chk := func(s string, e error) string { + if err == nil { + err = e + } + return s + } + i := 0 // Save the line number for use after the loop. + scanner := bufio.NewScanner(strings.NewReader(s)) + for ; scanner.Scan() && err == nil; i++ { + for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) { + level := 5 + var ch byte + switch ch, s = s[0], s[1:]; ch { + case '&': // followed by or '[' ']' + if s = skipSpace(s); consume(&s, '[') { + s = chk(parseSpecialAnchor(p, s)) + } else { + s = chk(parseAnchor(p, 0, s)) + } + case '<': // sort relation '<'{1,4}, optionally followed by '*'. + for level = 1; consume(&s, '<'); level++ { + } + if level > 4 { + err = fmt.Errorf("level %d > 4", level) + } + fallthrough + case '=': // identity relation, optionally followed by *. + if consume(&s, '*') { + s = chk(parseSequence(p, level, s)) + } else { + s = chk(parseOrder(p, level, s)) + } + default: + chk("", fmt.Errorf("illegal operator %q", ch)) + break + } + } + } + if chk("", scanner.Err()); err != nil { + return fmt.Errorf("%d: %v", i, err) + } + return nil +} + +// parseSpecialAnchor parses the anchor syntax which is either of the form +// ['before' ] +// or +// [